示例#1
0
def test_sigclip(capsys):
    # Confirm rejection of high pixel and correct output DQ
    data = np.array([1., 1., 1., 2., 2., 2., 2., 100.]).reshape(8, 1)
    ndd = NDAstroData(data)
    stackit = NDStacker(combine="mean",
                        reject="sigclip",
                        lsigma=3,
                        hsigma=3,
                        debug_pixel=0)
    result = stackit(ndd, save_rejection_map=True)
    assert_allclose(result.data, 1.5714285714285714)  # 100 is rejected
    assert result.meta['other']['REJMAP'].data[0] == 1

    out = capsys.readouterr().out
    expected = """\
Rejection: sigclip {'lsigma': 3, 'hsigma': 3}
img     data        mask    variance       immediately after rejection
  0          1.0000     0               -
  1          1.0000     0               -
  2          1.0000     0               -
  3          2.0000     0               -
  4          2.0000     0               -
  5          2.0000     0               -
  6          2.0000     0               -
  7        100.0000 32768               -
"""
    assert expected.splitlines() == out.splitlines()[13:23]

    stackit = NDStacker(combine="mean", reject="sigclip", lsigma=5, hsigma=5)
    result = stackit(ndd)
    assert_allclose(result.data, 13.875)  # 100 is not rejected
示例#2
0
def test_ndstacker(capsys):
    stacker = NDStacker(combine="foo")
    assert capsys.readouterr().out == \
        'No such combiner as foo. Using mean instead.\n'
    assert stacker._combiner is NDStacker.mean

    stacker = NDStacker(reject="foo")
    assert capsys.readouterr().out == \
        'No such rejector as foo. Using none instead.\n'
    assert stacker._rejector is NDStacker.none
示例#3
0
def test_varclip():
    # Confirm rejection of high pixel and correct output DQ
    data = np.array([1., 1., 2., 2., 2., 100.]).reshape(6, 1)
    ndd = NDAstroData(data)
    ndd.mask = np.zeros_like(data, dtype=DQ.datatype)
    ndd.mask[5, 0] = DQ.saturated
    ndd.variance = np.ones_like(data)
    stackit = NDStacker(combine="mean", reject="varclip")
    result = stackit(ndd)
    np.testing.assert_array_almost_equal(result.data, [1.6])
    np.testing.assert_array_equal(result.mask, [0])
示例#4
0
def test_varclip():
    # Confirm rejection of high pixel and correct output DQ
    data = np.array([1., 1., 2., 2., 2., 100.]).reshape(6, 1)
    ndd = NDAstroData(data,
                      mask=np.zeros_like(data, dtype=DQ.datatype),
                      variance=np.ones_like(data))
    ndd.mask[5, 0] = DQ.saturated
    stackit = NDStacker(combine="mean", reject="varclip")
    result = stackit(ndd)
    assert_allclose(result.data, 1.6)  # 100 is rejected
    assert_allclose(result.mask, 0)

    data = np.array([1., 1., 2., 2., 2., 100.]).reshape(6, 1)
    ndd = NDAstroData(data, variance=np.ones_like(data))
    ndd.variance[5] = 400
    stackit = NDStacker(combine="mean", reject="varclip", lsigma=3, hsigma=3)
    result = stackit(ndd)
    assert_allclose(result.data, 1.6)  # 100 is rejected

    stackit = NDStacker(combine="mean", reject="varclip", lsigma=5, hsigma=5)
    result = stackit(ndd)
    assert_allclose(result.data, 18)  # 100 is not rejected
示例#5
0
    def stackFrames(self, adinputs=None, **params):
        """
        This primitive will stack each science extension in the input dataset.
        New variance extensions are created from the stacked science extensions
        and the data quality extensions are propagated through to the final
        file.

        Parameters
        ----------
        adinputs : list of :class:`~astrodata.AstroData`
            Any set of 2D.

        suffix : str
            Suffix to be added to output files.

        apply_dq : bool
            Apply DQ mask to data before combining?

        nlow, nhigh : int
            Number of low and high pixels to reject, for the 'minmax' method.
            The way it works is inherited from IRAF: the fraction is specified
            as the number of  high  and low  pixels,  the  nhigh and nlow
            parameters, when data from all the input images are used.  If
            pixels  have  been  rejected  by offseting,  masking, or
            thresholding then a matching fraction of the remaining pixels,
            truncated to an integer, are used.  Thus::

                nl = n * nlow/nimages + 0.001
                nh = n * nhigh/nimages + 0.001

            where n is the number of pixels  surviving  offseting,  masking,
            and  thresholding,  nimages  is the number of input images, nlow
            and nhigh are task parameters  and  nl  and  nh  are  the  final
            number  of  low  and high pixels rejected by the algorithm.  The
            factor of 0.001 is to adjust for rounding of the ratio.

        operation : str
            Combine method.

        reject_method : str
            Pixel rejection method (none, minmax, sigclip, varclip).

        zero : bool
            Apply zero-level offset to match background levels?

        scale : bool
            Scale images to the same intensity?

        memory : float or None
            Available memory (in GB) for stacking calculations.

        statsec : str
            Section for statistics.

        separate_ext : bool
            Handle extensions separately?

        Returns
        -------
        list of :class:`~astrodata.AstroData`
            Sky stacked image. This list contains only one element. The list
            format is maintained so this primitive is consistent with all the
            others.

        Raises
        ------
        IOError
            If the number of extensions in any of the `AstroData` objects is
            different.

        IOError
            If the shape of any extension in any `AstroData` object is different.

        AssertError
            If any of the `.gain()` descriptors is None.

        AssertError
            If any of the `.read_noise()` descriptors is None.
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys["stackFrames"]
        sfx = params["suffix"]
        memory = params["memory"]
        if memory is not None:
            memory = int(memory * 1000000000)

        zero = params["zero"]
        scale = params["scale"]
        apply_dq = params["apply_dq"]
        separate_ext = params["separate_ext"]
        statsec = params["statsec"]
        reject_method = params["reject_method"]
        save_rejection_map = params["save_rejection_map"]

        if statsec:
            statsec = tuple([
                slice(int(start) - 1, int(end))
                for x in reversed(statsec.strip('[]').split(','))
                for start, end in [x.split(':')]
            ])

        if len(adinputs) <= 1:
            log.stdinfo("No stacking will be performed, since at least two "
                        "input AstroData objects are required for stackFrames")
            return adinputs

        if (reject_method == "minmax" and self.mode == "qa"
                and params["nlow"] + params["nhigh"] >= len(adinputs)):
            log.warning(
                "Trying to reject too many images. Setting nlow=nhigh=0.")
            params["nlow"] = 0
            params["nhigh"] = 0

        if len({len(ad) for ad in adinputs}) > 1:
            raise OSError("Not all inputs have the same number of extensions")
        if len({ext.nddata.shape for ad in adinputs for ext in ad}) > 1:
            raise OSError("Not all inputs images have the same shape")

        # We will determine the average gain from the input AstroData
        # objects and add in quadrature the read noise
        gains = [ad.gain() for ad in adinputs]
        read_noises = [ad.read_noise() for ad in adinputs]

        # Determine whether we can construct these averages
        process_gain = all(g is not None for gain in gains for g in gain)
        process_rn = all(rn is not None for read_noise in read_noises
                         for rn in read_noise)

        # Compute gain and read noise of final stacked images
        num_img = len(adinputs)
        num_ext = len(adinputs[0])
        if process_gain:
            gain_list = [
                np.mean([gain[i] for gain in gains]) for i in range(num_ext)
            ]
        if process_rn:
            read_noise_list = [
                np.sqrt(np.sum([rn[i] * rn[i]
                                for rn in read_noises])) / num_img
                for i in range(num_ext)
            ]

        zero_offsets = np.zeros((num_ext, num_img), dtype=np.float32)
        scale_factors = np.ones_like(zero_offsets)

        # Try to determine how much memory we're going to need to stack and
        # whether it's necessary to flush pixel data to disk first
        # Also determine kernel size from offered memory and bytes per pixel
        bytes_per_ext = []
        for ext in adinputs[0]:
            bytes = 0
            # Count _data twice to handle temporary arrays
            bytes += 2 * ext.data.dtype.itemsize
            if ext.variance is not None:
                bytes += ext.variance.dtype.itemsize

            bytes += 2  # mask always created
            bytes_per_ext.append(bytes * np.product(ext.shape))

        if memory is not None and (num_img * max(bytes_per_ext) > memory):
            adinputs = self.flushPixels(adinputs)

        # Compute the scale and offset values by accessing the memmapped data
        # so we can pass those to the stacking function
        # TODO: Should probably be done better to consider only the overlap
        # regions between frames
        if scale or zero:
            levels = np.empty((num_img, num_ext), dtype=np.float32)
            for i, ad in enumerate(adinputs):
                for index in range(num_ext):
                    nddata = (ad[index].nddata.window[:] if statsec is None
                              else ad[i][index].nddata.window[statsec])
                    #levels[i, index] = np.median(nddata.data)
                    levels[i,
                           index] = gt.measure_bg_from_image(nddata,
                                                             value_only=True)
            if scale and zero:
                log.warning(
                    "Both scale and zero are set. Setting scale=False.")
                scale = False
            if separate_ext:
                # Target value is corresponding extension of first image
                if scale:
                    scale_factors = (levels[0] / levels).T
                else:  # zero=True
                    zero_offsets = (levels[0] - levels).T
            else:
                # Target value is mean of all extensions of first image
                target = np.mean(levels[0])
                if scale:
                    scale_factors = np.tile(target / np.mean(levels, axis=1),
                                            num_ext).reshape(num_ext, num_img)
                else:  # zero=True
                    zero_offsets = np.tile(target - np.mean(levels, axis=1),
                                           num_ext).reshape(num_ext, num_img)
            if scale and np.min(scale_factors) < 0:
                log.warning("Some scale factors are negative. Not scaling.")
                scale_factors = np.ones_like(scale_factors)
                scale = False
            if scale and np.any(np.isinf(scale_factors)):
                log.warning("Some scale factors are infinite. Not scaling.")
                scale_factors = np.ones_like(scale_factors)
                scale = False
            if scale and np.any(np.isnan(scale_factors)):
                log.warning("Some scale factors are undefined. Not scaling.")
                scale_factors = np.ones_like(scale_factors)
                scale = False

        if reject_method == "varclip" and any(ext.variance is None
                                              for ad in adinputs
                                              for ext in ad):
            log.warning("Rejection method 'varclip' has been chosen but some"
                        " extensions have no variance. 'sigclip' will be used"
                        " instead.")
            reject_method = "sigclip"

        log.stdinfo("Combining {} inputs with {} and {} rejection".format(
            num_img, params["operation"], reject_method))

        stack_function = NDStacker(combine=params["operation"],
                                   reject=reject_method,
                                   log=self.log,
                                   **params)

        # NDStacker uses DQ if it exists; if we don't want that, delete the DQs!
        if not apply_dq:
            [setattr(ext, 'mask', None) for ad in adinputs for ext in ad]

        ad_out = astrodata.create(adinputs[0].phu)
        for index, (ext, sfactors, zfactors) in enumerate(
                zip(adinputs[0], scale_factors, zero_offsets)):
            status = (f"Combining extension {ext.id}."
                      if num_ext > 1 else "Combining images.")
            if scale:
                status += " Applying scale factors."
                numbers = sfactors
            elif zero:
                status += " Applying offsets."
                numbers = zfactors
            log.stdinfo(status)
            if ((scale or zero) and (index == 0 or separate_ext)):
                for ad, value in zip(adinputs, numbers):
                    log.stdinfo("{:40s}{:10.3f}".format(ad.filename, value))

            shape = adinputs[0][index].nddata.shape
            if memory is None:
                kernel = shape
            else:
                # Chop the image horizontally into equal-sized chunks to process
                # This uses the minimum number of steps and uses minimum memory
                # per step.
                oversubscription = (bytes_per_ext[index] *
                                    num_img) // memory + 1
                kernel = ((shape[0] + oversubscription - 1) //
                          oversubscription, ) + shape[1:]

            with_uncertainty = True  # Since all stacking methods return variance
            with_mask = apply_dq and not any(
                ad[index].nddata.window[:].mask is None for ad in adinputs)
            result = windowedOp(stack_function,
                                [ad[index].nddata for ad in adinputs],
                                scale=sfactors,
                                zero=zfactors,
                                kernel=kernel,
                                dtype=np.float32,
                                with_uncertainty=with_uncertainty,
                                with_mask=with_mask,
                                save_rejection_map=save_rejection_map)
            ad_out.append(result)
            log.stdinfo("")

        # Propagate REFCAT as the union of all input REFCATs
        refcats = [ad.REFCAT for ad in adinputs if hasattr(ad, 'REFCAT')]
        if refcats:
            try:
                out_refcat = table.unique(table.vstack(
                    refcats, metadata_conflicts='silent'),
                                          keys=('RAJ2000', 'DEJ2000'))
            except KeyError:
                pass
            else:
                out_refcat['Id'] = list(range(1, len(out_refcat) + 1))
                ad_out.REFCAT = out_refcat

        # Set AIRMASS to be the mean of the input values
        try:
            airmass_kw = ad_out._keyword_for('airmass')
            mean_airmass = np.mean([ad.airmass() for ad in adinputs])
        except Exception:  # generic implementation failure (probably non-Gemini)
            pass
        else:
            ad_out.phu.set(airmass_kw, mean_airmass,
                           "Mean airmass for the exposure")

        # Set GAIN to the average of input gains. Set the RDNOISE to the
        # sum in quadrature of the input read noises.
        if process_gain:
            for ext, gain in zip(ad_out, gain_list):
                ext.hdr.set('GAIN', gain, self.keyword_comments['GAIN'])
            ad_out.phu.set('GAIN', gain_list[0], self.keyword_comments['GAIN'])

        if process_rn:
            for ext, rn in zip(ad_out, read_noise_list):
                ext.hdr.set('RDNOISE', rn, self.keyword_comments['RDNOISE'])
            ad_out.phu.set('RDNOISE', read_noise_list[0],
                           self.keyword_comments['RDNOISE'])

        # Add suffix to datalabel to distinguish from the reference frame
        if sfx[0] == '_':
            extension = sfx.replace('_', '-', 1).upper()
        else:
            extension = '-' + sfx.upper()
        ad_out.phu.set('DATALAB', "{}{}".format(ad_out.data_label(),
                                                extension),
                       self.keyword_comments['DATALAB'])

        # Add other keywords to the PHU about the stacking inputs
        ad_out.orig_filename = ad_out.phu.get('ORIGNAME')
        ad_out.phu.set('NCOMBINE', len(adinputs),
                       self.keyword_comments['NCOMBINE'])
        for i, ad in enumerate(adinputs, start=1):
            ad_out.phu.set('IMCMB{:03d}'.format(i),
                           ad.phu.get('ORIGNAME', ad.filename))

        # Timestamp and update filename and prepare to return single output
        gt.mark_history(ad_out, primname=self.myself(), keyword=timestamp_key)
        ad_out.update_filename(suffix=sfx, strip=True)

        return [ad_out]
    def removePatternNoise(self, adinputs=None, **params):
        """
        This attempts to remove the pattern noise in NIRI/GNIRS data. In each
        quadrant, boxes of a specified size are extracted and, for each pixel
        location in the box, the median across all the boxes is determined.
        The resultant median is then tiled to the size of the quadrant and
        subtracted. Optionally, the median of each box can be subtracted
        before performing the operation.

        Based on Andy Stephens's "cleanir"

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        force: bool
            perform operation even if standard deviation in quadrant increases?
        hsigma/lsigma: float
            sigma-clipping limits
        pattern_x_size: int
            size of pattern "box" in x direction
        pattern_y_size: int
            size of pattern "box" in y direction
        subtract_background: bool
            remove median of each "box" before calculating pattern noise?
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        hsigma, lsigma = params["hsigma"], params["lsigma"]
        pxsize, pysize = params["pattern_x_size"], params["pattern_y_size"]
        bgsub = params["subtract_background"]
        force = params["force"]
        stack_function = NDStacker(combine='median', reject='sigclip',
                                   hsigma=hsigma, lsigma=lsigma)
        sigclip = partial(sigma_clip, sigma_lower=lsigma, sigma_upper=hsigma)
        zeros = None  # will remain unchanged if not subtract_background

        for ad in adinputs:
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by removePatternNoise".
                            format(ad.filename))
                continue

            for ext in ad:
                qysize, qxsize = [size // 2 for size in ext.data.shape]
                yticks = [(y, y + pysize) for y in range(0, qysize, pysize)]
                xticks = [(x, x + pxsize) for x in range(0, qxsize, pxsize)]
                for ystart in (0, qysize):
                    for xstart in (0, qxsize):
                        quad = ext.nddata[ystart:ystart + qysize, xstart:xstart + qxsize]
                        sigma_in = sigclip(np.ma.masked_array(quad.data, quad.mask)).std()
                        # print sigma_in
                        blocks = [quad[tuple(slice(start, end)
                                             for (start, end) in coords)]
                                  for coords in cart_product(yticks, xticks)]
                        if bgsub:
                            # If all pixels are masked in a box, we'll get no
                            # result from the mean. Suppress warning.
                            with warnings.catch_warnings():
                                warnings.simplefilter("ignore", category=UserWarning)
                                zeros = np.nan_to_num([-np.ma.masked_array(block.data, block.mask).mean()
                                                       for block in blocks])
                        out = stack_function(blocks, zero=zeros).data
                        out_quad = (quad.data + np.mean(out) -
                                    np.tile(out, (len(yticks), len(xticks))))
                        sigma_out = sigclip(np.ma.masked_array(out_quad, quad.mask)).std()
                        if sigma_out > sigma_in:
                            qstr = (f"{ad.filename} extension {ext.id} "
                                    f"quadrant ({xstart},{ystart})")
                            if force:
                                log.stdinfo("Forcing cleaning on " + qstr)
                            else:
                                log.stdinfo("No improvement for "+qstr)
                                continue
                        ext.data[ystart:ystart + qysize, xstart:xstart + qxsize] = out_quad

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=params["suffix"], strip=True)
        return adinputs
示例#7
0
    def stackFrames(self, adinputs=None, **params):
        """
        This primitive will stack each science extension in the input dataset.
        New variance extensions are created from the stacked science extensions
        and the data quality extensions are propagated through to the final
        file.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        apply_dq: bool
            apply DQ mask to data before combining?
        nhigh: int
            number of high pixels to reject
        nlow: int
            number of low pixels to reject
        operation: str
            combine method
        reject_method: str
            type of pixel rejection (passed to gemcombine)
        zero: bool
            apply zero-level offset to match background levels?
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys["stackFrames"]
        sfx = params["suffix"]
        memory = params["memory"]
        if memory is not None:
            memory = int(memory * 1000000000)

        zero = params["zero"]
        scale = params["scale"]
        apply_dq = params["apply_dq"]
        separate_ext = params["separate_ext"]
        statsec = params["statsec"]
        reject_method = params["reject_method"]
        if statsec:
            statsec = tuple([
                slice(int(start) - 1, int(end))
                for x in reversed(statsec.strip('[]').split(','))
                for start, end in [x.split(':')]
            ])

        if len(adinputs) <= 1:
            log.stdinfo("No stacking will be performed, since at least two "
                        "input AstroData objects are required for stackFrames")
            return adinputs

        if (reject_method == "minmax" and self.mode == "qa"
                and params["nlow"] + params["nhigh"] >= len(adinputs)):
            log.warning(
                "Trying to reject too many images. Setting nlow=nhigh=0.")
            params["nlow"] = 0
            params["nhigh"] = 0

        # Perform various checks on inputs
        for ad in adinputs:
            if not "PREPARED" in ad.tags:
                raise IOError("{} must be prepared".format(ad.filename))

        if len(set(len(ad) for ad in adinputs)) > 1:
            raise IOError("Not all inputs have the same number of extensions")
        if len(set([ext.nddata.shape for ad in adinputs for ext in ad])) > 1:
            raise IOError("Not all inputs images have the same shape")

        # Determine the average gain from the input AstroData objects and
        # add in quadrature the read noise
        gains = [ad.gain() for ad in adinputs]
        read_noises = [ad.read_noise() for ad in adinputs]

        assert all(gain is not None for gain in gains), "Gain problem"
        assert all(rn is not None for rn in read_noises), "RN problem"

        # Compute gain and read noise of final stacked images
        nexts = len(gains[0])
        gain_list = [
            np.mean([gain[i] for gain in gains]) for i in range(nexts)
        ]
        read_noise_list = [
            np.sqrt(np.sum([rn[i] * rn[i] for rn in read_noises]))
            for i in range(nexts)
        ]

        num_img = len(adinputs)
        num_ext = len(adinputs[0])
        zero_offsets = np.zeros((num_ext, num_img), dtype=np.float32)
        scale_factors = np.ones_like(zero_offsets)

        # Try to determine how much memory we're going to need to stack and
        # whether it's necessary to flush pixel data to disk first
        # Also determine kernel size from offered memory and bytes per pixel
        bytes_per_ext = []
        for ext in adinputs[0]:
            bytes = 0
            # Count _data twice to handle temporary arrays
            for attr in ('_data', '_data', '_uncertainty'):
                item = getattr(ext.nddata, attr)
                if item is not None:
                    # A bit of numpy weirdness in the difference between normal
                    # python types ("float32") and numpy types ("np.uint16")
                    try:
                        bytes += item.dtype.itemsize
                    except TypeError:
                        bytes += item.dtype().itemsize
                    except AttributeError:  # For non-lazy VAR
                        bytes += item._array.dtype.itemsize
            bytes += 2  #  mask always created
            bytes_per_ext.append(bytes * np.multiply.reduce(ext.nddata.shape))

        if memory is not None and (num_img * max(bytes_per_ext) > memory):
            adinputs = self.flushPixels(adinputs)

        # Compute the scale and offset values by accessing the memmapped data
        # so we can pass those to the stacking function
        # TODO: Should probably be done better to consider only the overlap
        # regions between frames
        if scale or zero:
            levels = np.empty((num_img, num_ext), dtype=np.float32)
            for i, ad in enumerate(adinputs):
                for index in range(num_ext):
                    nddata = (ad[index].nddata.window[:] if statsec is None
                              else ad[i][index].nddata.window[statsec])
                    #levels[i, index] = np.median(nddata.data)
                    levels[i,
                           index] = gt.measure_bg_from_image(nddata,
                                                             value_only=True)
            if scale and zero:
                log.warning(
                    "Both scale and zero are set. Setting scale=False.")
                scale = False
            if separate_ext:
                # Target value is corresponding extension of first image
                if scale:
                    scale_factors = (levels[0] / levels).T
                else:  # zero=True
                    zero_offsets = (levels[0] - levels).T
            else:
                # Target value is mean of all extensions of first image
                target = np.mean(levels[0])
                if scale:
                    scale_factors = np.tile(target / np.mean(levels, axis=1),
                                            num_ext).reshape(num_ext, num_img)
                else:  # zero=True
                    zero_offsets = np.tile(target - np.mean(levels, axis=1),
                                           num_ext).reshape(num_ext, num_img)
            if scale and np.min(scale_factors) < 0:
                log.warning("Some scale factors are negative. Not scaling.")
                scale_factors = np.ones_like(scale_factors)
                scale = False
            if scale and np.any(np.isinf(scale_factors)):
                log.warning("Some scale factors are infinite. Not scaling.")
                scale_factors = np.ones_like(scale_factors)
                scale = False
            if scale and np.any(np.isnan(scale_factors)):
                log.warning("Some scale factors are undefined. Not scaling.")
                scale_factors = np.ones_like(scale_factors)
                scale = False

        if reject_method == "varclip" and any(ext.variance is None
                                              for ad in adinputs
                                              for ext in ad):
            log.warning("Rejection method 'varclip' has been chosen but some"
                        "extensions have no variance. 'sigclip' will be used"
                        "instead.")
            reject_method = "sigclip"

        stack_function = NDStacker(combine=params["operation"],
                                   reject=reject_method,
                                   log=self.log,
                                   **params)

        # NDStacker uses DQ if it exists; if we don't want that, delete the DQs!
        if not apply_dq:
            [setattr(ext, 'mask', None) for ad in adinputs for ext in ad]

        ad_out = astrodata.create(adinputs[0].phu)
        for index, (extver, sfactors, zfactors) in enumerate(
                zip(adinputs[0].hdr.get('EXTVER'), scale_factors,
                    zero_offsets)):
            status = ("Combining EXTVER {}.".format(extver)
                      if num_ext > 1 else "Combining images.")
            if scale:
                status += " Applying scale factors."
                numbers = sfactors
            elif zero:
                status += " Applying offsets."
                numbers = zfactors
            log.stdinfo(status)
            if ((scale or zero) and (index == 0 or separate_ext)):
                for ad, value in zip(adinputs, numbers):
                    log.stdinfo("{:40s}{:10.3f}".format(ad.filename, value))

            shape = adinputs[0][index].nddata.shape
            if memory is None:
                kernel = shape
            else:
                # Chop the image horizontally into equal-sized chunks to process
                # This uses the minimum number of steps and uses minimum memory
                # per step.
                oversubscription = (bytes_per_ext[index] *
                                    num_img) // memory + 1
                kernel = ((shape[0] + oversubscription - 1) //
                          oversubscription, ) + shape[1:]
            with_uncertainty = True  # Since all stacking methods return variance
            with_mask = apply_dq and not any(
                ad[index].nddata.window[:].mask is None for ad in adinputs)
            result = windowedOp(partial(stack_function,
                                        scale=sfactors,
                                        zero=zfactors),
                                [ad[index].nddata for ad in adinputs],
                                kernel=kernel,
                                dtype=np.float32,
                                with_uncertainty=with_uncertainty,
                                with_mask=with_mask)
            ad_out.append(result)
            log.stdinfo("")

        # Propagate REFCAT as the union of all input REFCATs
        refcats = [ad.REFCAT for ad in adinputs if hasattr(ad, 'REFCAT')]
        if refcats:
            out_refcat = table.unique(table.vstack(
                refcats, metadata_conflicts='silent'),
                                      keys='Cat_Id')
            out_refcat['Cat_Id'] = list(range(1, len(out_refcat) + 1))
            ad_out.REFCAT = out_refcat

        # Set AIRMASS to be the mean of the input values
        try:
            airmass_kw = ad_out._keyword_for('airmass')
            mean_airmass = np.mean([ad.airmass() for ad in adinputs])
        except:  # generic implementation failure (probably non-Gemini)
            pass
        else:
            ad_out.phu.set(airmass_kw, mean_airmass,
                           "Mean airmass for the exposure")

        # Set GAIN to the average of input gains. Set the RDNOISE to the
        # sum in quadrature of the input read noises.
        for ext, gain, rn in zip(ad_out, gain_list, read_noise_list):
            ext.hdr.set('GAIN', gain, self.keyword_comments['GAIN'])
            ext.hdr.set('RDNOISE', rn, self.keyword_comments['RDNOISE'])
        # Stick the first extension's values in the PHU
        ad_out.phu.set('GAIN', gain_list[0], self.keyword_comments['GAIN'])
        ad_out.phu.set('RDNOISE', read_noise_list[0],
                       self.keyword_comments['RDNOISE'])

        # Add suffix to datalabel to distinguish from the reference frame
        ad_out.phu.set('DATALAB', "{}{}".format(ad_out.data_label(), sfx),
                       self.keyword_comments['DATALAB'])

        # Add other keywords to the PHU about the stacking inputs
        ad_out.orig_filename = ad_out.phu.get('ORIGNAME')
        ad_out.phu.set('NCOMBINE', len(adinputs),
                       self.keyword_comments['NCOMBINE'])
        for i, ad in enumerate(adinputs, start=1):
            ad_out.phu.set('IMCMB{:03d}'.format(i),
                           ad.phu.get('ORIGNAME', ad.filename))

        # Timestamp and update filename and prepare to return single output
        gt.mark_history(ad_out, primname=self.myself(), keyword=timestamp_key)
        ad_out.update_filename(suffix=sfx, strip=True)

        return [ad_out]