Beispiel #1
0
 def test_array_information(self):
     ad = astrodata.open(os.path.join(TESTDATAPATH, 'GMOS',
                                         'N20110524S0358_varAdded.fits'))
     ret = gt.array_information(ad)
     assert ret == {'amps_per_array': {1: 1, 2: 1, 3: 1},
                   'amps_order': [0, 1, 2], 'array_number': [1, 2, 3],
                   'reference_extension': 2}
Beispiel #2
0
    def tileArrays(self, adinputs=None, **params):
        """
        This primitive combines extensions by tiling (no interpolation).
        The array_section() and detector_section() descriptors are used
        to derive the geometry of the tiling, so outside help (from the
        instrument's geometry_conf module) is only required if there are
        multiple arrays being tiled together, as the gaps need to be
        specified.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        tile_all: bool
            tile to a single extension, rather than one per array?
            (array=physical detector)
        sci_only: bool
            tile only the data plane?
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        suffix = params['suffix']
        tile_all = params['tile_all']
        attributes = ['data'] if params["sci_only"] else None

        adoutputs = []
        for ad in adinputs:
            if len(ad) == 1:
                log.warning("{} has only one extension, so there's nothing "
                            "to tile".format(ad.filename))
                adoutputs.append(ad)
                continue

            # Get information to calculate the output geometry
            # TODO: Think about arbitrary ROIs
            array_info = gt.array_information(ad)
            detshape = array_info.detector_shape
            if not tile_all and set(array_info.array_shapes) == {(1, 1)}:
                log.warning("{} has nothing to tile, as tile_all=False but "
                            "each array has only one amplifier.")
                adoutputs.append(ad)
                continue

            blocks = [
                Block(ad[arrays], shape=shape) for arrays, shape in zip(
                    array_info.extensions, array_info.array_shapes)
            ]
            offsets = [
                ad[exts[0]].array_section() for exts in array_info.extensions
            ]

            if tile_all and detshape != (1, 1):  # We need gaps!
                geotable = import_module('.geometry_conf', self.inst_lookups)
                chip_gaps = geotable.tile_gaps[ad.detector_name()]
                try:
                    xgap, ygap = chip_gaps
                except TypeError:  # single number, applies to both
                    xgap = ygap = chip_gaps
                transforms = []
                for i, (origin,
                        offset) in enumerate(zip(array_info.origins, offsets)):
                    xshift = (origin[1] + offset.x1 + xgap *
                              (i % detshape[1])) // ad.detector_x_bin()
                    yshift = (origin[0] + offset.y1 + ygap *
                              (i // detshape[1])) // ad.detector_y_bin()
                    transforms.append(
                        Transform(models.Shift(xshift) & models.Shift(yshift)))
                adg = AstroDataGroup(blocks, transforms)
                adg.set_reference()
                ad_out = adg.transform(attributes=attributes,
                                       process_objcat=True)
            else:
                # ADG.transform() produces full AD objects so we start with
                # the first one, and then append the single extensions created
                # by later calls to it.
                for i, block in enumerate(blocks):
                    # Simply create a single tiled array
                    adg = AstroDataGroup([block])
                    adg.set_reference()
                    if i == 0:
                        ad_out = adg.transform(attributes=attributes,
                                               process_objcat=True)
                    else:
                        ad_out.append(
                            adg.transform(attributes=attributes,
                                          process_objcat=True)[0])

            gt.mark_history(ad_out,
                            primname=self.myself(),
                            keyword=timestamp_key)
            ad_out.orig_filename = ad.filename
            ad_out.update_filename(suffix=suffix, strip=True)
            adoutputs.append(ad_out)
        return adoutputs
Beispiel #3
0
    def mosaicDetectors(self, adinputs=None, **params):
        """
        This primitive does a full mosaic of all the arrays in an AD object.
        An appropriate geometry_conf.py module containing geometric information
        is required.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files.
        sci_only: bool
            mosaic only SCI image data. Default is False
        order: int (1-5)
            order of spline interpolation
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        suffix = params['suffix']
        order = params['order']
        attributes = ['data'] if params['sci_only'] else None
        geotable = import_module('.geometry_conf', self.inst_lookups)

        adoutputs = []
        for ad in adinputs:
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by mosaicDetectors".format(
                                ad.filename))
                adoutputs.append(ad)
                continue

            if len(ad) == 1:
                log.warning("{} has only one extension, so there's nothing "
                            "to mosaic".format(ad.filename))
                adoutputs.append(ad)
                continue

            # If there's an overscan section, we must trim it before mosaicking
            try:
                overscan_kw = ad._keyword_for('overscan_section')
            except AttributeError:  # doesn't exist for this AD, so carry on
                pass
            else:
                if overscan_kw in ad.hdr:
                    ad = gt.trim_to_data_section(ad, self.keyword_comments)

            # Create the blocks (individual physical detectors)
            array_info = gt.array_information(ad)
            blocks = [
                Block(ad[arrays], shape=shape) for arrays, shape in zip(
                    array_info.extensions, array_info.array_shapes)
            ]
            offsets = [
                ad[exts[0]].array_section() for exts in array_info.extensions
            ]

            detname = ad.detector_name()
            xbin, ybin = ad.detector_x_bin(), ad.detector_y_bin()
            geometry = geotable.geometry[detname]
            default_shape = geometry.get('default_shape')
            adg = AstroDataGroup()

            for block, origin, offset in zip(blocks, array_info.origins,
                                             offsets):
                # Origins are in (x, y) order in LUT
                block_geom = geometry[origin[::-1]]
                nx, ny = block_geom.get('shape', default_shape)
                nx /= xbin
                ny /= ybin
                shift = block_geom.get('shift', (0, 0))
                rot = block_geom.get('rotation', 0.)
                mag = block_geom.get('magnification', (1, 1))
                transform = Transform()

                # Shift the Block's coordinates based on its location within
                # the full array, to ensure any rotation takes place around
                # the true centre.
                if offset.x1 != 0 or offset.y1 != 0:
                    transform.append(
                        models.Shift(float(offset.x1) / xbin)
                        & models.Shift(float(offset.y1) / ybin))

                if rot != 0 or mag != (1, 1):
                    # Shift to centre, do whatever, and then shift back
                    transform.append(
                        models.Shift(-0.5 * (nx - 1)) & models.Shift(-0.5 *
                                                                     (ny - 1)))
                    if rot != 0:
                        # Cope with non-square pixels by scaling in one
                        # direction to make them square before applying the
                        # rotation, and then reversing that.
                        if xbin != ybin:
                            transform.append(
                                models.Identity(1) & models.Scale(ybin / xbin))
                        transform.append(models.Rotation2D(rot))
                        if xbin != ybin:
                            transform.append(
                                models.Identity(1) & models.Scale(xbin / ybin))
                    if mag != (1, 1):
                        transform.append(
                            models.Scale(mag[0]) & models.Scale(mag[1]))
                    transform.append(
                        models.Shift(0.5 * (nx - 1)) & models.Shift(0.5 *
                                                                    (ny - 1)))
                transform.append(
                    models.Shift(float(shift[0]) / xbin)
                    & models.Shift(float(shift[1]) / ybin))
                adg.append(block, transform)

            adg.set_reference()
            ad_out = adg.transform(attributes=attributes,
                                   order=order,
                                   process_objcat=False)

            ad_out.orig_filename = ad.filename
            gt.mark_history(ad_out,
                            primname=self.myself(),
                            keyword=timestamp_key)
            ad_out.update_filename(suffix=suffix, strip=True)
            adoutputs.append(ad_out)

        return adoutputs
    def normalizeFlat(self, adinputs=None, **params):
        """
        This primitive normalizes a GMOS Longslit spectroscopic flatfield
        in a manner similar to that performed by gsflat in Gemini-IRAF.
        A cubic spline is fitted along the dispersion direction of each
        row, separately for each CCD.

        As this primitive is GMOS-specific, we know the dispersion direction
        will be along the rows, and there will be 3 CCDs.

        For Hamamatsu CCDs, the 21 unbinned columns at each CCD edge are
        masked out, following the procedure in gsflat.
        TODO: Should we add these in the BPM?

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        spectral_order: int/str
            order of fit in spectral direction
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        # For flexibility, the code is going to pass whatever validated
        # parameters it gets (apart from suffix and spectral_order) to
        # the spline fitter
        spline_kwargs = params.copy()
        suffix = spline_kwargs.pop("suffix")
        spectral_order = spline_kwargs.pop("spectral_order")
        threshold = spline_kwargs.pop("threshold")

        # Parameter validation should ensure we get an int or a list of 3 ints
        try:
            orders = [int(x) for x in spectral_order]
        except TypeError:
            orders = [spectral_order] * 3

        for ad in adinputs:
            xbin, ybin = ad.detector_x_bin(), ad.detector_y_bin()
            array_info = gt.array_information(ad)
            is_hamamatsu = 'Hamamatsu' in ad.detector_name(pretty=True)
            ad_tiled = self.tileArrays([ad], tile_all=False)[0]
            ad_fitted = astrodata.create(ad.phu)
            for ext, order, indices in zip(ad_tiled, orders,
                                           array_info.extensions):
                # If the entire row is unilluminated, we want to fit
                # the pixels but still keep the edges masked
                try:
                    ext.mask ^= (np.bitwise_and.reduce(ext.mask, axis=1)
                                 & DQ.unilluminated)[:, None]
                except TypeError:  # ext.mask is None
                    pass
                else:
                    if is_hamamatsu:
                        ext.mask[:, :21 // xbin] = 1
                        ext.mask[:, -21 // xbin:] = 1
                fitted_data = np.empty_like(ext.data)
                pixels = np.arange(ext.shape[1])

                for i, row in enumerate(ext.nddata):
                    masked_data = np.ma.masked_array(row.data, mask=row.mask)
                    weights = np.sqrt(
                        np.where(row.variance > 0, 1. / row.variance, 0.))
                    spline = astromodels.UnivariateSplineWithOutlierRemoval(
                        pixels,
                        masked_data,
                        order=order,
                        w=weights,
                        **spline_kwargs)
                    fitted_data[i] = spline(pixels)
                # Copy header so we have the _section() descriptors
                ad_fitted.append(fitted_data, header=ext.hdr)

            # Find the largest spline value for each row across all extensions
            # and mask pixels below the requested fraction of the peak
            row_max = np.array([
                ext_fitted.data.max(axis=1) for ext_fitted in ad_fitted
            ]).max(axis=0)

            # Prevent runtime error in division
            row_max[row_max == 0] = np.inf

            for ext_fitted in ad_fitted:
                ext_fitted.mask = np.where(
                    (ext_fitted.data.T / row_max).T < threshold,
                    DQ.unilluminated, DQ.good)

            for ext_fitted, indices in zip(ad_fitted, array_info.extensions):
                tiled_arrsec = ext_fitted.array_section()
                for i in indices:
                    ext = ad[i]
                    arrsec = ext.array_section()
                    slice_ = (slice((arrsec.y1 - tiled_arrsec.y1) // ybin,
                                    (arrsec.y2 - tiled_arrsec.y1) // ybin),
                              slice((arrsec.x1 - tiled_arrsec.x1) // xbin,
                                    (arrsec.x2 - tiled_arrsec.x1) // xbin))
                    # Suppress warnings to do with fitted_data==0
                    # (which create NaNs in variance)
                    with np.errstate(invalid='ignore', divide='ignore'):
                        ext.divide(ext_fitted.nddata[slice_])
                    np.nan_to_num(ext.data, copy=False, posinf=0, neginf=0)
                    np.nan_to_num(ext.variance, copy=False)

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)

        return adinputs
Beispiel #5
0
    def QECorrect(self, adinputs=None, **params):
        """
        This primitive applies a wavelength-dependent QE correction to
        a 2D spectral image, based on the wavelength solution of an
        associated processed_arc.

        It is only designed to work on FLATs, and therefore unmosaicked data.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        arc : {None, AstroData, str}
            Arc(s) with distortion map.
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        sfx = params["suffix"]
        arc = params["arc"]

        # Get a suitable arc frame (with distortion map) for every science AD
        if arc is None:
            self.getProcessedArc(adinputs, refresh=False)
            arc_list = self._get_cal(adinputs, 'processed_arc')
        else:
            arc_list = arc

        for ad, arc in zip(*gt.make_lists(adinputs, arc_list, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by QECorrect".
                            format(ad.filename))
                continue

            if 'e2v' in ad.detector_name(pretty=True):
                log.stdinfo(f"{ad.filename} has the e2v CCDs, so no QE "
                            "correction is necessary")
                continue

            if self.timestamp_keys['mosaicDetectors'] in ad.phu:
                log.warning(f"{ad.filename} has been processed by mosaic"
                            "Detectors so QECorrect cannot be run")
                continue

            # Determines whether to multiply or divide by QE correction
            is_flat = 'FLAT' in ad.tags

            # If the arc's binning doesn't match, we may still be able to
            # fall back to the approximate solution
            xbin, ybin = ad.detector_x_bin(), ad.detector_y_bin()
            if arc is not None and (arc.detector_x_bin() != xbin or
                                    arc.detector_y_bin() != ybin):
                log.warning("Science frame {} and arc {} have different binnings,"
                            "so cannot use arc".format(ad.filename, arc.filename))
                arc = None

            # The plan here is to attach the mosaic gWCS to the science frame,
            # apply an origin shift to put it in the frame of the arc, and
            # then use the arc's WCS to get the wavelength. If there's no arc,
            # we just use the science frame's WCS.
            # Since we're going to change that WCS, store it for restoration.
            original_wcs = [ext.wcs for ext in ad]
            try:
                transform.add_mosaic_wcs(ad, geotable)
            except ValueError:
                log.warning(f"{ad.filename} already has a 'mosaic' coordinate"
                            "frame. This is unexpected but I'll continue.")

            if arc is None:
                if 'sq' in self.mode:
                    raise OSError(f"No processed arc listed for {ad.filename}")
                else:
                    log.warning(f"No arc supplied for {ad.filename}")
            else:
                # OK, we definitely want to try to do this, get a wavelength solution
                if self.timestamp_keys['determineWavelengthSolution'] not in arc.phu:
                    msg = f"Arc {arc.filename} (for {ad.filename} has not been wavelength calibrated."
                    if 'sq' in self.mode:
                        raise IOError(msg)
                    else:
                        log.warning(msg)

                # We'll be modifying this
                arc_wcs = deepcopy(arc[0].wcs)
                if 'distortion_corrected' not in arc_wcs.available_frames:
                    msg = f"Arc {arc.filename} (for {ad.filename}) has no distortion model."
                    if 'sq' in self.mode:
                        raise OSError(msg)
                    else:
                        log.warning(msg)

                # NB. At this point, we could have an arc that has no good
                # wavelength solution nor distortion correction. But we will
                # use its WCS rather than the science frame's because it must
                # have been supplied by the user.

                # This is GMOS so no need to be as generic as distortionCorrect
                ad_detsec = ad.detector_section()
                arc_detsec = arc.detector_section()[0]
                if (ad_detsec[0].x1, ad_detsec[-1].x2) != (arc_detsec.x1, arc_detsec.x2):
                    raise ValueError("I don't know how to process the "
                                     f"offsets between {ad.filename} "
                                     f"and {arc.filename}")

                yoff1 = arc_detsec.y1 - ad_detsec[0].y1
                yoff2 = arc_detsec.y2 - ad_detsec[0].y2
                arc_ext_shapes = [(ext.shape[0] - yoff1 + yoff2,
                                   ext.shape[1]) for ext in ad]
                arc_corners = np.concatenate([transform.get_output_corners(
                    ext.wcs.get_transform(ext.wcs.input_frame, 'mosaic'),
                    input_shape=arc_shape, origin=(yoff1, 0))
                    for ext, arc_shape in zip(ad, arc_ext_shapes)], axis=1)
                arc_origin = tuple(np.ceil(min(corners)) for corners in arc_corners)

                # So this is what was applied to the ARC to get the
                # mosaic frame to its pixel frame, in which the distortion
                # correction model was calculated. Convert coordinates
                # from python order to Model order.
                origin_shift = reduce(Model.__and__, [models.Shift(-origin)
                                                      for origin in arc_origin[::-1]])
                arc_wcs.insert_transform(arc_wcs.input_frame, origin_shift, after=True)

            array_info = gt.array_information(ad)
            if array_info.detector_shape == (1, 3):
                ccd2_indices = array_info.extensions[1]
            else:
                raise ValueError(f"{ad.filename} does not have 3 separate detectors")

            for index, ext in enumerate(ad):
                if index in ccd2_indices:
                    continue

                # Use the WCS in the extension if we don't have an arc,
                # otherwise use the arc's mosaic->world transformation
                if arc is None:
                    trans = ext.wcs.forward_transform
                else:
                    trans = (ext.wcs.get_transform(ext.wcs.input_frame, 'mosaic') |
                             arc_wcs.forward_transform)

                ygrid, xgrid = np.indices(ext.shape)
                # TODO: want with_units
                waves = trans(xgrid, ygrid)[0] * u.nm  # Wavelength always axis 0
                try:
                    qe_correction = qeModel(ext)((waves / u.nm).to(u.dimensionless_unscaled).value).astype(np.float32)
                except TypeError:  # qeModel() returns None
                    msg = "No QE correction found for {}:{}".format(ad.filename, ext.hdr['EXTVER'])
                    if 'sq' in self.mode:
                        raise ValueError(msg)
                    else:
                        log.warning(msg)
                log.stdinfo("Mean relative QE of EXTVER {} is {:.5f}".
                             format(ext.hdr['EXTVER'], qe_correction.mean()))
                if not is_flat:
                    qe_correction = 1. / qe_correction
                qe_correction[qe_correction < 0] = 0
                qe_correction[qe_correction > 10] = 0
                ext.multiply(qe_correction)

            for ext, orig_wcs in zip(ad, original_wcs):
                ext.wcs = orig_wcs

            # Timestamp and update the filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=sfx, strip=True)

        return adinputs
    def tileArrays(self, adinputs=None, **params):
        """
        This primitive combines extensions by tiling (no interpolation).
        The array_section() and detector_section() descriptors are used
        to derive the geometry of the tiling, so outside help (from the
        instrument's geometry_conf module) is only required if there are
        multiple arrays being tiled together, as the gaps need to be
        specified.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        tile_all: bool
            tile to a single extension, rather than one per array?
            (array=physical detector)
        sci_only: bool
            tile only the data plane?
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        suffix = params['suffix']
        tile_all = params['tile_all']
        attributes = ['data'] if params["sci_only"] else None

        adoutputs = []
        for ad in adinputs:
            if len(ad) == 1:
                log.warning("{} has only one extension, so there's nothing "
                            "to tile".format(ad.filename))
                adoutputs.append(ad)
                continue

            # Get information to calculate the output geometry
            # TODO: Think about arbitrary ROIs
            array_info = gt.array_information(ad)
            detshape = array_info.detector_shape
            if not tile_all and set(array_info.array_shapes) == {(1, 1)}:
                log.warning("{} has nothing to tile, as tile_all=False but "
                            "each array has only one amplifier.")
                adoutputs.append(ad)
                continue

            blocks = [Block(ad[arrays], shape=shape) for arrays, shape in
                      zip(array_info.extensions, array_info.array_shapes)]
            offsets = [ad[exts[0]].array_section()
                       for exts in array_info.extensions]

            if tile_all and detshape != (1, 1):  # We need gaps!
                geotable = import_module('.geometry_conf', self.inst_lookups)
                chip_gaps = geotable.tile_gaps[ad.detector_name()]
                try:
                    xgap, ygap = chip_gaps
                except TypeError:  # single number, applies to both
                    xgap = ygap = chip_gaps
                transforms = []
                for i, (origin, offset) in enumerate(zip(array_info.origins, offsets)):
                    xshift = (origin[1] + offset.x1 + xgap * (i % detshape[1])) // ad.detector_x_bin()
                    yshift = (origin[0] + offset.y1 + ygap * (i // detshape[1])) // ad.detector_y_bin()
                    transforms.append(Transform(models.Shift(xshift) & models.Shift(yshift)))
                adg = AstroDataGroup(blocks, transforms)
                adg.set_reference()
                ad_out = adg.transform(attributes=attributes, process_objcat=True)
            else:
                # ADG.transform() produces full AD objects so we start with
                # the first one, and then append the single extensions created
                # by later calls to it.
                for i, block in enumerate(blocks):
                    # Simply create a single tiled array
                    adg = AstroDataGroup([block])
                    adg.set_reference()
                    if i == 0:
                        ad_out = adg.transform(attributes=attributes,
                                               process_objcat=True)
                    else:
                        ad_out.append(adg.transform(attributes=attributes,
                                                    process_objcat=True)[0])

            gt.mark_history(ad_out, primname=self.myself(), keyword=timestamp_key)
            ad_out.orig_filename = ad.filename
            ad_out.update_filename(suffix=suffix, strip=True)
            adoutputs.append(ad_out)
        return adoutputs
Beispiel #7
0
    def QECorrect(self, adinputs=None, **params):
        """
        This primitive applies a wavelength-dependent QE correction to
        a 2D spectral image, based on the wavelength solution of an
        associated processed_arc.

        It is only designed to work on FLATs, and therefore unmosaicked data.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        arc : {None, AstroData, str}
            Arc(s) with distortion map.
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        sfx = params["suffix"]
        arc = params["arc"]
        use_iraf = params["use_iraf"]
        do_cal = params["do_cal"]

        if do_cal == 'skip':
            log.warning("QE correction has been turned off.")
            return adinputs

        # Get a suitable arc frame (with distortion map) for every science AD
        if arc is None:
            arc_list = self.caldb.get_processed_arc(adinputs)
        else:
            arc_list = (arc, None)

        # Provide an arc AD object for every science frame, and an origin
        for ad, arc, origin in zip(
                *gt.make_lists(adinputs, *arc_list, force_ad=(1, ))):
            if ad.phu.get(timestamp_key):
                log.warning(f"{ad.filename}: already processed by QECorrect. "
                            "Continuing.")
                continue

            if 'e2v' in ad.detector_name(pretty=True):
                log.stdinfo(f"{ad.filename} has the e2v CCDs, so no QE "
                            "correction is necessary")
                continue

            if self.timestamp_keys['mosaicDetectors'] in ad.phu:
                log.warning(f"{ad.filename} has been processed by mosaic"
                            "Detectors so QECorrect cannot be run")
                continue

            # Determines whether to multiply or divide by QE correction
            is_flat = 'FLAT' in ad.tags

            # If the arc's binning doesn't match, we may still be able to
            # fall back to the approximate solution
            xbin, ybin = ad.detector_x_bin(), ad.detector_y_bin()
            if arc is not None and (arc.detector_x_bin() != xbin
                                    or arc.detector_y_bin() != ybin):
                log.warning("Science frame and arc have different binnings.")
                arc = None

            # The plan here is to attach the mosaic gWCS to the science frame,
            # apply an origin shift to put it in the frame of the arc, and
            # then use the arc's WCS to get the wavelength. If there's no arc,
            # we just use the science frame's WCS.
            # Since we're going to change that WCS, store it for restoration.
            original_wcs = [ext.wcs for ext in ad]
            try:
                transform.add_mosaic_wcs(ad, geotable)
            except ValueError:
                log.warning(f"{ad.filename} already has a 'mosaic' coordinate"
                            "frame. This is unexpected but I'll continue.")

            if arc is None:
                if 'sq' in self.mode or do_cal == 'force':
                    raise OSError(f"No processed arc listed for {ad.filename}")
                else:
                    log.warning(f"{ad.filename}: no arc was specified. Using "
                                "wavelength solution in science frame.")
            else:
                # OK, we definitely want to try to do this, get a wavelength solution
                origin_str = f" (obtained from {origin})" if origin else ""
                log.stdinfo(f"{ad.filename}: using the arc {arc.filename}"
                            f"{origin_str}")
                if self.timestamp_keys[
                        'determineWavelengthSolution'] not in arc.phu:
                    msg = f"Arc {arc.filename} (for {ad.filename} has not been wavelength calibrated."
                    if 'sq' in self.mode or do_cal == 'force':
                        raise IOError(msg)
                    else:
                        log.warning(msg)

                # We'll be modifying this
                arc_wcs = deepcopy(arc[0].wcs)
                if 'distortion_corrected' not in arc_wcs.available_frames:
                    msg = f"Arc {arc.filename} (for {ad.filename}) has no distortion model."
                    if 'sq' in self.mode or do_cal == 'force':
                        raise OSError(msg)
                    else:
                        log.warning(msg)

                # NB. At this point, we could have an arc that has no good
                # wavelength solution nor distortion correction. But we will
                # use its WCS rather than the science frame's because it must
                # have been supplied by the user.

                # This is GMOS so no need to be as generic as distortionCorrect
                ad_detsec = ad.detector_section()
                arc_detsec = arc.detector_section()[0]
                if (ad_detsec[0].x1, ad_detsec[-1].x2) != (arc_detsec.x1,
                                                           arc_detsec.x2):
                    raise ValueError("Cannot process the offsets between "
                                     f"{ad.filename} and {arc.filename}")

                yoff1 = arc_detsec.y1 - ad_detsec[0].y1
                yoff2 = arc_detsec.y2 - ad_detsec[0].y2
                arc_ext_shapes = [(ext.shape[0] - yoff1 + yoff2, ext.shape[1])
                                  for ext in ad]
                arc_corners = np.concatenate([
                    transform.get_output_corners(ext.wcs.get_transform(
                        ext.wcs.input_frame, 'mosaic'),
                                                 input_shape=arc_shape,
                                                 origin=(yoff1, 0))
                    for ext, arc_shape in zip(ad, arc_ext_shapes)
                ],
                                             axis=1)
                arc_origin = tuple(
                    np.ceil(min(corners)) for corners in arc_corners)

                # So this is what was applied to the ARC to get the
                # mosaic frame to its pixel frame, in which the distortion
                # correction model was calculated. Convert coordinates
                # from python order to Model order.
                origin_shift = reduce(
                    Model.__and__,
                    [models.Shift(-origin) for origin in arc_origin[::-1]])
                arc_wcs.insert_transform(arc_wcs.input_frame,
                                         origin_shift,
                                         after=True)

            array_info = gt.array_information(ad)
            if array_info.detector_shape == (1, 3):
                ccd2_indices = array_info.extensions[1]
            else:
                raise ValueError(
                    f"{ad.filename} does not have 3 separate detectors")

            for index, ext in enumerate(ad):
                if index in ccd2_indices:
                    continue

                # Use the WCS in the extension if we don't have an arc,
                # otherwise use the arc's mosaic->world transformation
                if arc is None:
                    trans = ext.wcs.forward_transform
                else:
                    trans = (ext.wcs.get_transform(ext.wcs.input_frame,
                                                   'mosaic')
                             | arc_wcs.forward_transform)

                ygrid, xgrid = np.indices(ext.shape)
                # TODO: want with_units
                waves = trans(xgrid,
                              ygrid)[0] * u.nm  # Wavelength always axis 0

                # Tapering required to prevent QE correction from blowing up
                # at the extremes (remember, this is a ratio, not the actual QE)
                # We use half-Gaussians to taper
                taper = np.ones_like(ext.data)
                taper_locut, taper_losig = 350 * u.nm, 25 * u.nm
                taper_hicut, taper_hisig = 1200 * u.nm, 200 * u.nm
                taper[waves < taper_locut] = np.exp(-(
                    (waves[waves < taper_locut] - taper_locut) /
                    taper_losig)**2)
                taper[waves > taper_hicut] = np.exp(-(
                    (waves[waves > taper_hicut] - taper_hicut) /
                    taper_hisig)**2)
                try:
                    qe_correction = (qeModel(ext, use_iraf=use_iraf)(
                        (waves / u.nm).to(u.dimensionless_unscaled).value).
                                     astype(np.float32) - 1) * taper + 1
                except TypeError:  # qeModel() returns None
                    msg = f"No QE correction found for {ad.filename} extension {ext.id}"
                    if 'sq' in self.mode:
                        raise ValueError(msg)
                    else:
                        log.warning(msg)
                        continue
                log.stdinfo(f"Mean relative QE of extension {ext.id} is "
                            f"{qe_correction.mean():.5f}")
                if not is_flat:
                    qe_correction = 1. / qe_correction
                ext.multiply(qe_correction)

            for ext, orig_wcs in zip(ad, original_wcs):
                ext.wcs = orig_wcs

            # Timestamp and update the filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=sfx, strip=True)

        return adinputs
    def mosaicDetectors(self, adinputs=None, **params):
        """
        This primitive does a full mosaic of all the arrays in an AD object.
        An appropriate geometry_conf.py module containing geometric information
        is required.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files.
        sci_only: bool
            mosaic only SCI image data. Default is False
        order: int (1-5)
            order of spline interpolation
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        suffix = params['suffix']
        order = params['order']
        attributes = ['data'] if params['sci_only'] else None
        geotable = import_module('.geometry_conf', self.inst_lookups)

        adoutputs = []
        for ad in adinputs:
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by mosaicDetectors".
                            format(ad.filename))
                adoutputs.append(ad)
                continue

            if len(ad) == 1:
                log.warning("{} has only one extension, so there's nothing "
                            "to mosaic".format(ad.filename))
                adoutputs.append(ad)
                continue

            # If there's an overscan section, we must trim it before mosaicking
            try:
                overscan_kw = ad._keyword_for('overscan_section')
            except AttributeError:  # doesn't exist for this AD, so carry on
                pass
            else:
                if overscan_kw in ad.hdr:
                    ad = gt.trim_to_data_section(ad, self.keyword_comments)

            # Create the blocks (individual physical detectors)
            array_info = gt.array_information(ad)
            blocks = [Block(ad[arrays], shape=shape) for arrays, shape in
                      zip(array_info.extensions, array_info.array_shapes)]
            offsets = [ad[exts[0]].array_section()
                       for exts in array_info.extensions]

            detname = ad.detector_name()
            xbin, ybin = ad.detector_x_bin(), ad.detector_y_bin()
            geometry = geotable.geometry[detname]
            default_shape = geometry.get('default_shape')
            adg = AstroDataGroup()

            for block, origin, offset in zip(blocks, array_info.origins, offsets):
                # Origins are in (x, y) order in LUT
                block_geom = geometry[origin[::-1]]
                nx, ny = block_geom.get('shape', default_shape)
                nx /= xbin
                ny /= ybin
                shift = block_geom.get('shift', (0, 0))
                rot = block_geom.get('rotation', 0.)
                mag = block_geom.get('magnification', (1, 1))
                transform = Transform()

                # Shift the Block's coordinates based on its location within
                # the full array, to ensure any rotation takes place around
                # the true centre.
                if offset.x1 != 0 or offset.y1 != 0:
                    transform.append(models.Shift(float(offset.x1) / xbin) &
                                     models.Shift(float(offset.y1) / ybin))

                if rot != 0 or mag != (1, 1):
                    # Shift to centre, do whatever, and then shift back
                    transform.append(models.Shift(-0.5*(nx-1)) &
                                     models.Shift(-0.5*(ny-1)))
                    if rot != 0:
                        # Cope with non-square pixels by scaling in one
                        # direction to make them square before applying the
                        # rotation, and then reversing that.
                        if xbin != ybin:
                            transform.append(models.Identity(1) & models.Scale(ybin / xbin))
                        transform.append(models.Rotation2D(rot))
                        if xbin != ybin:
                            transform.append(models.Identity(1) & models.Scale(xbin / ybin))
                    if mag != (1, 1):
                        transform.append(models.Scale(mag[0]) &
                                         models.Scale(mag[1]))
                    transform.append(models.Shift(0.5*(nx-1)) &
                                     models.Shift(0.5*(ny-1)))
                transform.append(models.Shift(float(shift[0]) / xbin) &
                                 models.Shift(float(shift[1]) / ybin))
                adg.append(block, transform)

            adg.set_reference()
            ad_out = adg.transform(attributes=attributes, order=order,
                                   process_objcat=False)

            ad_out.orig_filename = ad.filename
            gt.mark_history(ad_out, primname=self.myself(), keyword=timestamp_key)
            ad_out.update_filename(suffix=suffix, strip=True)
            adoutputs.append(ad_out)

        return adoutputs
Beispiel #9
0
    def display(self, adinputs=None, **params):
        """
        Displays an image on the ds9 display, using multiple frames if
        there are multiple extensions. Saturated pixels can be displayed
        in red, and overlays can also be shown.

        Parameters
        ----------
        extname: str
            'SCI', 'VAR', or 'DQ': plane to display
        frame: int
            starting frame for display
        ignore: bool
            setting to True turns off the display
        remove_bias: bool
            attempt to subtract bias before displaying?
        threshold: str='auto'/float
            level above which to flag pixels as saturated
        tile: bool
            attempt to tile arrays before displaying?
        zscale: bool
            use zscale algorithm?
        overlay: list
            list of overlays for the display
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))

        # No-op if ignore=True
        if params["ignore"]:
            log.warning("display turned off per user request")
            return

        threshold = params['threshold']
        remove_bias = params.get('remove_bias', False)
        extname = params['extname']
        tile = params['tile']
        zscale = params['zscale']
        overlays = params['overlay']
        frame = params['frame'] if params['frame'] else 1
        overlay_index = 0
        lnd = _localNumDisplay()

        if isinstance(overlays, str):
            try:
                overlays = _read_overlays_from_file(overlays)
            except OSError:
                log.warning(f"Cannot open overlays file {overlays}")
                overlays = None

        for ad in adinputs:
            # Allows elegant break from nested loops
            if frame > 16:
                log.warning("Too many images; only the first 16 are displayed")
                break

            # Threshold and bias make sense only for SCI extension
            if extname != 'SCI':
                threshold = None
                remove_bias = False
            elif threshold == 'None':
                threshold = None
            elif threshold == 'auto':
                mosaicked = ((ad.phu.get(
                    self.timestamp_keys["mosaicDetectors"]) is not None)
                             or (ad.phu.get(self.timestamp_keys["tileArrays"])
                                 is not None))
                has_dq = all([ext.mask is not None for ext in ad])
                if not has_dq:
                    if mosaicked:
                        log.warning("Cannot add DQ to mosaicked data; no "
                                    "threshold mask will be applied to "
                                    "{}".format(ad.filename))
                        threshold = None
                    else:
                        # addDQ operates in place so deepcopy to preserve input
                        ad = self.addDQ([deepcopy(ad)])[0]

            if remove_bias:
                if (ad.phu.get('BIASIM') or ad.phu.get('DARKIM') or ad.phu.get(
                        self.timestamp_keys["subtractOverscan"])):
                    log.fullinfo("Bias level has already been removed from "
                                 "data; no approximate correction will be "
                                 "performed")
                else:
                    try:
                        bias_level = get_bias_level(ad)
                    except NotImplementedError:
                        # For non-GMOS instruments
                        bias_level = None

                    if bias_level is not None:
                        ad = deepcopy(ad)  # Leave original untouched!
                        log.stdinfo("Subtracting approximate bias level from "
                                    "{} for display".format(ad.filename))
                        log.fullinfo("Bias levels used: {}".format(
                            str(bias_level)))
                        for ext, bias in zip(ad, bias_level):
                            ext.subtract(
                                np.float32(bias) if bias is not None else 0)
                    else:
                        log.warning("Bias level not found for {}; approximate "
                                    "bias will not be removed".format(
                                        ad.filename))

            # Check whether data needs to be tiled before displaying
            # Otherwise, flatten all desired extensions into a single list
            num_ext = len(ad)
            if tile and num_ext > 1:
                log.fullinfo("Tiling extensions together before displaying")
                # post-transform metadata is arranged in order of blocks, not
                # slices, so we need to ensure the correct offsets are applied
                # to each slice
                array_info = gt.array_information(ad)
                ad = self.tileArrays([ad], tile_all=True)[0]
                # Logic here in case num_ext overlays sent to be applied to all ADs
                if overlays and len(overlays) + overlay_index >= num_ext:
                    new_overlay = []
                    trans_data = ad.nddata[0].meta.pop("transform")
                    for ext_indices, corner, block in zip(
                            array_info.extensions, trans_data["corners"],
                            trans_data["block_corners"]):
                        xshift = int(round(corner[1][0]))
                        yshift = int(round(corner[0][0]))
                        for ext_index, b in zip(ext_indices, block):
                            dx, dy = xshift + b[1], yshift + b[0]
                            i = overlay_index + ext_index
                            if overlays[i]:
                                new_overlay.extend([(x + dx, y + dy, r)
                                                    for x, y, r in overlays[i]
                                                    ])
                    overlays = (overlays[:overlay_index] + (new_overlay, ) +
                                overlays[overlay_index + num_ext:])

            # Each extension is an individual display item (if the data have been
            # tiled, then there'll only be one extension per AD, of course)
            for ext in ad:
                if frame > 16:
                    break

                # Squeeze the data to remove any empty dimensions (eg, raw F2 data)
                ext.operate(np.squeeze)

                # Get the data we're going to display. TODO Replace extname with attr?
                data = getattr(ext, {
                    'SCI': 'data',
                    'DQ': 'mask',
                    'VAR': 'variance'
                }[extname], None)
                dqdata = ext.mask
                if data is None:
                    log.warning("No data to display in {}[{}]".format(
                        ext.filename, extname))
                    continue

                # One-dimensional data (ie, extracted spectra)
                if len(data.shape) == 1:
                    continue

                # Make threshold mask if desired
                masks = []
                mask_colors = []
                if threshold is not None:
                    if threshold != 'auto':
                        satmask = data > threshold
                    else:
                        if dqdata is None:
                            log.warning("No DQ plane found; cannot make "
                                        "threshold mask")
                            satmask = None
                        else:
                            satmask = (dqdata &
                                       (DQ.non_linear | DQ.saturated)) > 0
                    if satmask is not None:
                        masks.append(satmask)
                        mask_colors.append(204)

                if overlays:
                    # Could be single overlay, or list. Replicate behaviour of
                    # gt.make_lists (which we can't use because we haven't
                    # made a complete list of displayed extensions at the start
                    # in order to avoid memory bloat)
                    try:
                        overlay = overlays[overlay_index]
                    except TypeError:
                        overlay = overlays
                    except IndexError:
                        if len(overlays) == 1:
                            overlay = overlays[0]
                    try:
                        masks.append(make_overlay_mask(overlay, ext.shape))
                    except Exception:
                        pass
                    else:
                        mask_colors.append(206)
                    overlay_index += 1

                # Define the display name
                if tile and extname == 'SCI':
                    name = ext.filename
                elif tile:
                    name = f'{ext.filename}({extname})'
                else:
                    name = f'{ext.filename}({extname}, extension {ext.id})'

                try:
                    lnd.display(data,
                                name=name,
                                frame=frame,
                                zscale=zscale,
                                bpm=None if extname == 'DQ' else dqdata,
                                quiet=True,
                                masks=masks,
                                mask_colors=mask_colors)
                except OSError:
                    log.warning("ds9 not found; cannot display input")

                frame += 1

                # Print from statistics for flats
                if extname == 'SCI' and {'GMOS', 'IMAGE', 'FLAT'}.issubset(
                        ext.tags):
                    good_data = data[dqdata ==
                                     0] if dqdata is not None else data
                    mean = np.mean(good_data)
                    median = np.median(good_data)
                    log.stdinfo("Twilight flat counts for {}:".format(
                        ext.filename))
                    log.stdinfo("    Mean value:   {:.0f}".format(mean))
                    log.stdinfo("    Median value: {:.0f}".format(median))

        return adinputs
Beispiel #10
0
    def tileArrays(self, adinputs=None, **params):
        """
        This primitive combines extensions by tiling (no interpolation).
        The array_section() and detector_section() descriptors are used
        to derive the geometry of the tiling, so outside help (from the
        instrument's geometry_conf module) is only required if there are
        multiple arrays being tiled together, as the gaps need to be
        specified.

        If the input AstroData objects still have non-data regions, these
        will not be trimmed. However, the WCS of the final image will
        only be correct for some of the image since extra space has been
        introduced into the image.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        tile_all: bool
            tile to a single extension, rather than one per array?
            (array=physical detector)
        sci_only: bool
            tile only the data plane?
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        suffix = params['suffix']
        tile_all = params['tile_all']
        attributes = ['data'] if params["sci_only"] else None

        adoutputs = []
        for ad in adinputs:
            if len(ad) == 1:
                log.warning("{} has only one extension, so there's nothing "
                            "to tile".format(ad.filename))
                adoutputs.append(ad)
                continue

            array_info = gt.array_information(ad)
            detshape = array_info.detector_shape
            if not tile_all and set(array_info.array_shapes) == {(1, 1)}:
                log.warning("{} has nothing to tile, as tile_all=False but "
                            "each array has only one amplifier.")
                adoutputs.append(ad)
                continue

            if tile_all and detshape != (1, 1):  # We need gaps!
                geotable = import_module('.geometry_conf', self.inst_lookups)
                chip_gaps = geotable.tile_gaps[ad.detector_name()]
                try:
                    xgap, ygap = chip_gaps
                except TypeError:  # single number, applies to both
                    xgap = ygap = chip_gaps

            kw = ad._keyword_for('data_section')
            xbin, ybin = ad.detector_x_bin(), ad.detector_y_bin()

            # Work out additional shifts required to cope with posisble overscan
            # regions, including those in already-tiled CCDs
            if tile_all:
                yorigins, xorigins = np.rollaxis(
                    np.array(array_info.origins),
                    1).reshape((2, ) + array_info.detector_shape)
                xorigins //= xbin
                yorigins //= ybin
            else:
                yorigins, xorigins = np.zeros((2, ) +
                                              array_info.detector_shape)
            it_ccd = np.nditer(xorigins, flags=['multi_index'])
            i = 0
            while not it_ccd.finished:
                ccdy, ccdx = it_ccd.multi_index
                shp = array_info.array_shapes[i]
                exts = array_info.extensions[i]
                xshifts = np.zeros(shp, dtype=np.int32)
                yshifts = np.zeros(shp, dtype=np.int32)
                it = np.nditer(np.array(exts).reshape(shp),
                               flags=['multi_index'])
                while not it.finished:
                    iy, ix = it.multi_index
                    ext = ad[int(it[0])]
                    datsec = ext.data_section()
                    if datsec.x1 > 0:
                        xshifts[iy, ix:] += datsec.x1
                    if datsec.x2 < ext.shape[1]:
                        xshifts[iy, ix + 1:] += ext.shape[1] - datsec.x2
                    if datsec.y1 > 0:
                        yshifts[iy:, ix] += datsec.y1
                    if datsec.y2 < ext.shape[0]:
                        xshifts[iy + 1:, ix] += ext.shape[0] - datsec.y2

                    arrsec = ext.array_section()
                    ext_shift = (models.Shift(
                        (arrsec.x1 // xbin - datsec.x1)) & models.Shift(
                            (arrsec.y1 // ybin - datsec.y1)))

                    # We need to have a "tile" Frame to resample to.
                    # We also need to perform the inverse, after the "tile"
                    # frame, of any change we make beforehand.
                    if ext.wcs is None:
                        ext.wcs = gWCS([(Frame2D(name="pixels"), ext_shift),
                                        (Frame2D(name="tile"), None)])
                    elif 'tile' not in ext.wcs.available_frames:
                        #ext.wcs.insert_frame(ext.wcs.input_frame, ext_shift,
                        #                     Frame2D(name="tile"))
                        ext.wcs = gWCS([(ext.wcs.input_frame, ext_shift),
                                        (Frame2D(name="tile"),
                                         ext.wcs.pipeline[0].transform)] +
                                       ext.wcs.pipeline[1:])
                        ext.wcs.insert_transform('tile',
                                                 ext_shift.inverse,
                                                 after=True)

                    dx, dy = xshifts[iy, ix], yshifts[iy, ix]
                    if tile_all:
                        dx += xorigins[ccdy, ccdx]
                        dy += yorigins[ccdy, ccdx]
                    if dx or dy:  # Don't bother if they're both zero
                        shift_model = models.Shift(dx) & models.Shift(dy)
                        ext.wcs.insert_transform('tile',
                                                 shift_model,
                                                 after=False)
                        if ext.wcs.output_frame.name != 'tile':
                            ext.wcs.insert_transform('tile',
                                                     shift_model.inverse,
                                                     after=True)

                    # Reset data_section since we're not trimming overscans
                    ext.hdr[kw] = '[1:{},1:{}]'.format(*reversed(ext.shape))
                    it.iternext()

                if tile_all:
                    # We need to shift other arrays if this one is larger than
                    # its expected size due to overscan regions. We've kept
                    # track of shifts we've introduced, but it might also be
                    # the case that we've been sent a previous tile_all=False output
                    if ccdx < detshape[1] - 1:
                        max_xshift = max(
                            xshifts.max(), ext.shape[1] -
                            (xorigins[ccdy, ccdx + 1] - xorigins[ccdy, ccdx]))
                        xorigins[ccdy, ccdx + 1:] += max_xshift + xgap // xbin
                    if ccdy < detshape[0] - 1:
                        max_yshift = max(
                            yshifts.max(), ext.shape[0] -
                            (yorigins[ccdy + 1, ccdx] - yorigins[ccdy, ccdx]))
                        yorigins[ccdy + 1:, ccdx] += max_yshift + ygap // ybin
                elif i == 0:
                    ad_out = transform.resample_from_wcs(ad[exts],
                                                         "tile",
                                                         attributes=attributes,
                                                         process_objcat=True)
                else:
                    ad_out.append(
                        transform.resample_from_wcs(ad[exts],
                                                    "tile",
                                                    attributes=attributes,
                                                    process_objcat=True)[0])
                i += 1
                it_ccd.iternext()

            if tile_all:
                ad_out = transform.resample_from_wcs(ad,
                                                     "tile",
                                                     attributes=attributes,
                                                     process_objcat=True)

            gt.mark_history(ad_out,
                            primname=self.myself(),
                            keyword=timestamp_key)
            ad_out.orig_filename = ad.filename
            ad_out.update_filename(suffix=suffix, strip=True)
            adoutputs.append(ad_out)
        return adoutputs
Beispiel #11
0
    def normalizeFlat(self, adinputs=None, **params):
        """
        This primitive normalizes a GMOS Longslit spectroscopic flatfield
        in a manner similar to that performed by gsflat in Gemini-IRAF.
        A cubic spline is fitted along the dispersion direction of each
        row, separately for each CCD.

        As this primitive is GMOS-specific, we know the dispersion direction
        will be along the rows, and there will be 3 CCDs.

        For Hamamatsu CCDs, the 21 unbinned columns at each CCD edge are
        masked out, following the procedure in gsflat.
        TODO: Should we add these in the BPM?

        Parameters
        ----------
        suffix : str/None
            suffix to be added to output files
        center : int/None
            central row/column for 1D extraction (None => use middle)
        nsum : int
            number of rows/columns around center to combine
        function : str
            type of function to fit (splineN or polynomial types)
        order : int/str
            Order of the spline fit to be performed
            (can be 3 ints, separated by commas)
        lsigma : float/None
            lower rejection limit in standard deviations
        hsigma : float/None
            upper rejection limit in standard deviations
        niter : int
            maximum number of rejection iterations
        grow : float/False
            growth radius for rejected pixels
        threshold : float
            threshold (relative to peak) for flagging unilluminated pixels
        interactive : bool
            set to activate an interactive preview to fine tune the input parameters
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        # For flexibility, the code is going to pass whatever validated
        # parameters it gets (apart from suffix and spectral_order) to
        # the spline fitter
        suffix = params["suffix"]
        threshold = params["threshold"]
        spectral_order = params["order"]
        all_fp_init = [fit_1D.translate_params(params)] * 3
        interactive_reduce = params["interactive"]

        # Parameter validation should ensure we get an int or a list of 3 ints
        try:
            orders = [int(x) for x in spectral_order]
        except TypeError:
            orders = [spectral_order] * 3
        # capture the per extension order into the fit parameters
        for order, fp_init in zip(orders, all_fp_init):
            fp_init["order"] = order

        for ad in adinputs:
            xbin, ybin = ad.detector_x_bin(), ad.detector_y_bin()
            array_info = gt.array_information(ad)
            is_hamamatsu = 'Hamamatsu' in ad.detector_name(pretty=True)
            ad_tiled = self.tileArrays([ad], tile_all=False)[0]
            ad_fitted = astrodata.create(ad.phu)
            all_fp_init = []

            # If the entire row is unilluminated, we want to fit
            # the pixels but still keep the edges masked
            for ext in ad_tiled:
                try:
                    ext.mask ^= (np.bitwise_and.reduce(ext.mask, axis=1)
                                 & DQ.unilluminated)[:, None]
                except TypeError:  # ext.mask is None
                    pass
                else:
                    if is_hamamatsu:
                        ext.mask[:, :21 // xbin] = 1
                        ext.mask[:, -21 // xbin:] = 1

                all_fp_init.append(fit_1D.translate_params(params))

            # Parameter validation should ensure we get an int or a list of 3 ints
            try:
                orders = [int(x) for x in spectral_order]
            except TypeError:
                orders = [spectral_order] * 3
            # capture the per extension order into the fit parameters
            for order, fp_init in zip(orders, all_fp_init):
                fp_init["order"] = order

            # Interactive or not
            if interactive_reduce:
                # all_X arrays are used to track appropriate inputs for each of the N extensions
                all_pixels = []
                all_domains = []
                nrows = ad_tiled[0].shape[0]
                for ext, order, indices in zip(ad_tiled, orders,
                                               array_info.extensions):
                    pixels = np.arange(ext.shape[1])

                    all_pixels.append(pixels)
                    dispaxis = 2 - ext.dispersion_axis()
                    all_domains.append([0, ext.shape[dispaxis] - 1])

                config = self.params[self.myself()]
                config.update(**params)

                # Create a 'row' parameter to add to the UI so the user can select the row they
                # want to fit.
                reinit_params = [
                    "row",
                ]
                reinit_extras = {
                    "row":
                    RangeField("Row of data to operate on",
                               int,
                               int(nrows / 2),
                               min=1,
                               max=nrows)
                }

                # This function is used by the interactive fitter to generate the x,y,weights to use
                # for each fit.  We only want to fit a single row of data interactively, so that we can
                # be responsive in the UI.  The 'row' extra parameter defined above will create a
                # slider for the user and we will have access to the selected value in the 'extras'
                # dictionary passed in here.
                def reconstruct_points(conf, extras):
                    r = min(0, extras['row'] - 1)
                    all_coords = []
                    for rppixels, rpext in zip(all_pixels, ad_tiled):
                        masked_data = np.ma.masked_array(
                            rpext.data[r],
                            mask=None if rpext.mask is None else rpext.mask[r])
                        if rpext.variance is None:
                            weights = None
                        else:
                            weights = np.sqrt(at.divide0(
                                1., rpext.variance[r]))
                        all_coords.append([rppixels, masked_data, weights])
                    return all_coords

                visualizer = fit1d.Fit1DVisualizer(reconstruct_points,
                                                   all_fp_init,
                                                   config=config,
                                                   reinit_params=reinit_params,
                                                   reinit_extras=reinit_extras,
                                                   tab_name_fmt="CCD {}",
                                                   xlabel='x',
                                                   ylabel='y',
                                                   reinit_live=True,
                                                   domains=all_domains,
                                                   title="Normalize Flat",
                                                   enable_user_masking=False)
                geminidr.interactive.server.interactive_fitter(visualizer)

                # The fit models were done on a single row, so we need to
                # get the parameters that were used in the final fit for
                # each one, and then rerun it on the full data for that
                # extension.
                all_m_final = visualizer.results()
                for m_final, ext in zip(all_m_final, ad_tiled):
                    masked_data = np.ma.masked_array(ext.data, mask=ext.mask)
                    weights = np.sqrt(at.divide0(1., ext.variance))

                    fit1d_params = m_final.extract_params()
                    fitted_data = fit_1D(masked_data,
                                         weights=weights,
                                         **fit1d_params,
                                         axis=1).evaluate()

                    # Copy header so we have the _section() descriptors
                    ad_fitted.append(fitted_data, header=ext.hdr)
            else:
                for ext, indices, fit1d_params in zip(ad_tiled,
                                                      array_info.extensions,
                                                      all_fp_init):
                    masked_data = np.ma.masked_array(ext.data, mask=ext.mask)
                    weights = np.sqrt(at.divide0(1., ext.variance))

                    fitted_data = fit_1D(masked_data,
                                         weights=weights,
                                         **fit1d_params,
                                         axis=1).evaluate()

                    # Copy header so we have the _section() descriptors
                    ad_fitted.append(fitted_data, header=ext.hdr)

            # Find the largest spline value for each row across all extensions
            # and mask pixels below the requested fraction of the peak
            row_max = np.array([
                ext_fitted.data.max(axis=1) for ext_fitted in ad_fitted
            ]).max(axis=0)

            # Prevent runtime error in division
            row_max[row_max == 0] = np.inf

            for ext_fitted in ad_fitted:
                ext_fitted.mask = np.where(
                    (ext_fitted.data.T / row_max).T < threshold,
                    DQ.unilluminated, DQ.good).astype(DQ.datatype)

            for ext_fitted, indices in zip(ad_fitted, array_info.extensions):
                tiled_arrsec = ext_fitted.array_section()
                for i in indices:
                    ext = ad[i]
                    arrsec = ext.array_section()
                    slice_ = (slice((arrsec.y1 - tiled_arrsec.y1) // ybin,
                                    (arrsec.y2 - tiled_arrsec.y1) // ybin),
                              slice((arrsec.x1 - tiled_arrsec.x1) // xbin,
                                    (arrsec.x2 - tiled_arrsec.x1) // xbin))
                    # Suppress warnings to do with fitted_data==0
                    # (which create NaNs in variance)
                    with np.errstate(invalid='ignore', divide='ignore'):
                        ext.divide(ext_fitted.nddata[slice_])
                    np.nan_to_num(ext.data, copy=False, posinf=0, neginf=0)
                    np.nan_to_num(ext.variance, copy=False)

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)

        return adinputs
    def scaleFringeToScience(self, rc):
        """
        This primitive will scale the fringes to their matching science data
        The fringes should be in the stream this primitive is called on,
        and the reference science frames should be loaded into the RC,
        as, eg. rc["science"] = adinput.
        
        There are two ways to find the value to scale fringes by:
        1. If stats_scale is set to True, the equation:
        (letting science data = b (or B), and fringe = a (or A))
    
        arrayB = where({where[SCIb < (SCIb.median+2.5*SCIb.std)]} 
                          > [SCIb.median-3*SCIb.std])
        scale = arrayB.std / SCIa.std
    
        The section of the SCI arrays to use for calculating these statistics
        is the CCD2 SCI data excluding the outer 5% pixels on all 4 sides.
        Future enhancement: allow user to choose section
    
        2. If stats_scale=False, then scale will be calculated using:
        exposure time of science / exposure time of fringe

        :param stats_scale: Use statistics to calculate the scale values,
                            rather than exposure time
        :type stats_scale: Python boolean (True/False)
        """
        
        # Instantiate the log
        log = gemLog.getGeminiLog(logType=rc["logType"],
                                  logLevel=rc["logLevel"])
        
        # Log the standard "starting primitive" debug message
        log.debug(gt.log_message("primitive", "scaleFringeToScience",
                                 "starting"))
        
        # Define the keyword to be used for the time stamp for this primitive
        timestamp_key = self.timestamp_keys["scaleFringeToScience"]

        # Check for user-supplied science frames
        fringe = rc.get_inputs_as_astrodata()
        science_param = rc["science"]
        fringe_dict = None
        if science_param is not None:
            # The user supplied an input to the science parameter
            if not isinstance(science_param, list):
                science_list = [science_param]
            else:
                science_list = science_param

            # If there is one fringe and multiple science frames,
            # the fringe must be deepcopied to allow it to be
            # scaled separately for each frame
            if len(fringe)==1 and len(science_list)>1:
                fringe = [deepcopy(fringe[0]) for img in science_list]

            # Convert filenames to AD instances if necessary
            tmp_list = []
            for science in science_list:
                if type(science) is not AstroData:
                    science = AstroData(science)
                tmp_list.append(science)
            science_list = tmp_list
            
            fringe_dict = gt.make_dict(key_list=science_list, 
                                       value_list=fringe)
            fringe_output = []
        else:
            log.warning("No science frames specified; no scaling will be done")
            science_list = []
            fringe_output = fringe

        # Loop over each AstroData object in the science list
        for ad in science_list:
            
            # Retrieve the appropriate fringe
            fringe = fringe_dict[ad]

            # Check the inputs have matching filters, binning and SCI shapes.
            try:
                gt.check_inputs_match(ad1=ad, ad2=fringe)
            except Errors.ToolboxError:
                # If not, try to clip the fringe frame to the size of the
                # science data
                # For a GMOS example, this allows a full frame fringe to
                # be used for a CCD2-only science frame. 
                fringe = gt.clip_auxiliary_data(
                    adinput=ad, aux=fringe, aux_type="cal")[0]

                # Check again, but allow it to fail if they still don't match
                gt.check_inputs_match(ad1=ad, ad2=fringe)

            # Check whether statistics should be used
            stats_scale = rc["stats_scale"]

            # Calculate the scale value
            scale = 1.0
            if not stats_scale:
                # Use the exposure times to calculate the scale
                log.fullinfo("Using exposure times to calculate the scaling"+
                             " factor")
                try:
                    scale = ad.exposure_time() / fringe.exposure_time()
                except:
                    raise Errors.InputError("Could not get exposure times " +
                                            "for %s, %s. Try stats_scale=True" %
                                            (ad.filename,fringe.filename))
            else:

                # Use statistics to calculate the scaling factor
                log.fullinfo("Using statistics to calculate the " +
                             "scaling factor")

                # Deepcopy the input so it can be manipulated without
                # affecting the original
                statsad = deepcopy(ad)
                statsfringe = deepcopy(fringe)

                # Trim off any overscan region still present
                statsad,statsfringe = gt.trim_to_data_section([statsad,
                                                               statsfringe])

                # Check the number of science extensions; if more than
                # one, use CCD2 data only
                nsciext = statsad.count_exts("SCI")
                if nsciext>1:

                    # Get the CCD numbers and ordering information
                    # corresponding to each extension
                    log.fullinfo("Trimming data to data section to remove "\
                                 "overscan region")
                    sci_info,frng_info = gt.array_information([statsad,
                                                               statsfringe])

                    # Pull out CCD2 data
                    scidata = []
                    frngdata = []
                    dqdata = []
                    for i in range(nsciext):

                        # Get the next extension in physical order
                        sciext = statsad["SCI",sci_info["amps_order"][i]]
                        frngext = statsfringe["SCI",frng_info["amps_order"][i]]

                        # Check to see if it is on CCD2; if so, keep it
                        if sci_info[
                            "array_number"][("SCI",sciext.extver())]==2:

                            scidata.append(sciext.data)

                            dqext = statsad["DQ",sci_info["amps_order"][i]]
                            maskext = statsad["OBJMASK",
                                              sci_info["amps_order"][i]]
                            if dqext is not None and maskext is not None:
                                dqdata.append(dqext.data | maskext.data)
                            elif dqext is not None:
                                dqdata.append(dqext.data)
                            elif maskext is not None:
                                dqdata.append(maskext.data)

                        if frng_info[
                            "array_number"][("SCI",frngext.extver())]==2:
                            frngdata.append(frngext.data)
                        
                    # Stack data if necessary
                    if len(scidata)>1:
                        scidata = np.hstack(scidata)
                        frngdata = np.hstack(frngdata)
                    else:
                        scidata = scidata[0]
                        frngdata = frngdata[0]
                    if len(dqdata)>0:
                        if len(dqdata)>1:
                            dqdata = np.hstack(dqdata)
                        else:
                            dqdata = dqdata[0]
                    else:
                        dqdata = None
                else:
                    scidata = statsad["SCI"].data
                    frngdata = statsfringe["SCI"].data

                    dqext = statsad["DQ"]
                    maskext = statsad["OBJMASK"]
                    if dqext is not None and maskext is not None:
                        dqdata = dqext.data | maskext.data
                    elif dqext is not None:
                        dqdata = dqext.data
                    elif maskext is not None:
                        dqdata = maskext.data
                    else:
                        dqdata = None

                if dqdata is not None:
                    # Replace any DQ-flagged data with the median value
                    smed = np.median(scidata[dqdata==0])
                    scidata = np.where(dqdata!=0,smed,scidata)

                # Calculate the maximum and minimum in a box centered on 
                # each data point.  The local depth of the fringe is
                # max - min.  The overall fringe strength is the median
                # of the local fringe depths.

                # Width of the box is binning and
                # filter dependent, determined by experimentation
                # Results don't seem to depend heavily on the box size
                if ad.filter_name(pretty=True).as_pytype=="i":
                    size = 20
                else:
                    size = 40
                size /= ad.detector_x_bin().as_pytype()
                
                # Use ndimage maximum_filter and minimum_filter to
                # get the local maxima and minima
                import scipy.ndimage as ndimage
                sci_max = ndimage.filters.maximum_filter(scidata,size)
                sci_min = ndimage.filters.minimum_filter(scidata,size)


                # Take off 5% of the width as a border
                xborder = int(0.05 * scidata.shape[1])
                yborder = int(0.05 * scidata.shape[0])
                if xborder<20:
                    xborder = 20
                if yborder<20:
                    yborder = 20
                sci_max = sci_max[yborder:-yborder,xborder:-xborder]
                sci_min = sci_min[yborder:-yborder,xborder:-xborder]

                # Take the median difference
                sci_df = np.median(sci_max - sci_min)

                # Do the same for the fringe
                frn_max = ndimage.filters.maximum_filter(frngdata,size)
                frn_min = ndimage.filters.minimum_filter(frngdata,size)
                frn_max = frn_max[yborder:-yborder,xborder:-xborder]
                frn_min = frn_min[yborder:-yborder,xborder:-xborder]
                frn_df = np.median(frn_max - frn_min)

                # Scale factor
                # This tends to overestimate the factor, but it is
                # at least in the right ballpark, unlike the estimation
                # used in girmfringe (masked_sci.std/fringe.std)
                scale = sci_df / frn_df

            log.fullinfo("Scale factor found = "+str(scale))
                
            # Use mult from the arith toolbox to perform the scaling of 
            # the fringe frame
            scaled_fringe = fringe.mult(scale)
            
            # Add the appropriate time stamps to the PHU
            gt.mark_history(adinput=scaled_fringe, keyword=timestamp_key)

            # Change the filename
            scaled_fringe.filename = gt.filename_updater(
                adinput=ad, suffix=rc["suffix"], strip=True)
            
            fringe_output.append(scaled_fringe)
            
        # Report the list of output AstroData objects to the reduction context
        rc.report_output(fringe_output)
        yield rc