Example #1
0
def test_calculate_affine_matrices(angle, scale, xoffset, yoffset):
    m = ((models.Scale(scale) & models.Scale(scale)) | models.Rotation2D(angle)
         | (models.Shift(xoffset) & models.Shift(yoffset)))
    affine = adwcs.calculate_affine_matrices(m, (100, 100))
    assert_allclose(affine.offset, (yoffset, xoffset), atol=1e-10)
    angle = math.radians(angle)
    assert_allclose(affine.matrix,
                    ((scale * math.cos(angle), scale * math.sin(angle)),
                     (-scale * math.sin(angle), scale * math.cos(angle))),
                    atol=1e-10)
Example #2
0
def align_images_from_wcs(adinput,
                          adref,
                          cull_sources=False,
                          transform=None,
                          min_sources=1,
                          search_radius=10,
                          match_radius=2,
                          rotate=False,
                          scale=False,
                          full_wcs=False,
                          brute=True,
                          return_matches=False):
    """
    This function takes two images (an input image, and a reference image) and
    works out the modifications needed to the WCS of the input images so that
    the world coordinates of its OBJCAT sources match the world coordinates of
    the OBJCAT sources in the reference image. This is done by modifying the
    WCS of the input image and mapping the reference image sources to pixels
    in the input image via the reference image WCS (fixed) and the input image
    WCS. As such, in the nomenclature of the fitting routines, the pixel
    positions of the input image's OBJCAT become the "reference" sources,
    while the converted positions of the reference image's OBJCAT are the
    "input" sources.

    Parameters
    ----------
    adinput: AstroData
        input AD whose pixel shift is requested
    adref: AstroData
        reference AD image
    transform: Transform/None
        existing transformation (if None, will do brute search)
    cull_sources: bool
        limit matched sources to "good" (i.e., stellar) objects
    min_sources: int
        minimum number of sources to use for cross-correlation
    search_radius: float
        size of search box (in pixels)
    match_radius: float
        matching radius for objects (in pixels)
    rotate: bool
        add a rotation to the alignment transform?
    scale: bool
        add a magnification to the alignment transform?
    full_wcs: bool
        use the two images' WCSs to reproject the reference image's coordinates
        onto the input image's pixel plane, rather than just align the OBJCAT
        coordinates?
    brute: bool
        perform brute (landscape) search first?
    return_matches: bool
        return a list of matched objects as well as the Transform?

    Returns
    -------
    matches: 2 lists
        OBJCAT sources in input and reference that are matched
    WCS: new WCS for input image
    """
    log = logutils.get_logger(__name__)
    if len(adinput) * len(adref) != 1:
        log.warning('Can only match single-extension images')
        return None

    try:
        input_objcat = adinput[0].OBJCAT
        ref_objcat = adref[0].OBJCAT
    except AttributeError:
        log.warning('Both input images must have object catalogs')
        return None

    if len(input_objcat) < min_sources or len(ref_objcat) < min_sources:
        log.warning("Too few sources in one or both images. Cannot align.")
        return None

    largest_dimension = max(*adinput[0].shape, *adref[0].shape)
    # Values larger than these result in errors of >1 pixel
    mag_threshold = 1. / largest_dimension
    rot_threshold = np.degrees(mag_threshold)

    # OK, we can proceed
    incoords = (input_objcat['X_IMAGE'].data - 1,
                input_objcat['Y_IMAGE'].data - 1)
    refcoords = (ref_objcat['X_IMAGE'].data - 1,
                 ref_objcat['Y_IMAGE'].data - 1)
    if cull_sources:
        good_src1 = gt.clip_sources(adinput)[0]
        good_src2 = gt.clip_sources(adref)[0]
        if len(good_src1) < min_sources or len(good_src2) < min_sources:
            log.warning("Too few sources in culled list, using full set "
                        "of sources")
        else:
            incoords = (good_src1["x"] - 1, good_src1["y"] - 1)
            refcoords = (good_src2["x"] - 1, good_src2["y"] - 1)

    # Set up the initial model
    magnification, rotation = 1, 0  # May be overridden later
    try:
        t = adref[0].wcs.forward_transform | adinput[0].wcs.backward_transform
    except AttributeError:  # for cases with no defined WCS
        t = None
        if full_wcs:
            log.warning(
                "Cannot determine WCS information: setting full_wcs=False")
            full_wcs = False

    if full_wcs:
        refcoords = t(*refcoords)
    elif transform is None and t is not None:
        transform = t.inverse
    if transform is None:
        transform = models.Identity(2)

    # We always refactor the transform (if provided) in a prescribed way so
    # as to ensure it's fittable and not overly weird
    affine = adwcs.calculate_affine_matrices(transform, adinput[0].shape)
    m_init = models.Shift(affine.offset[1]) & models.Shift(affine.offset[0])

    # This is approximate since the affine matrix might have differential
    # scaling and a shear
    magnification = np.sqrt(abs(np.linalg.det(affine.matrix)))
    rotation = np.degrees(
        np.arctan2(affine.matrix[1, 0] - affine.matrix[0, 1],
                   affine.matrix[0, 0] + affine.matrix[1, 1]))
    m_init.offset_0.bounds = (m_init.offset_0 - search_radius,
                              m_init.offset_0 + search_radius)
    m_init.offset_1.bounds = (m_init.offset_1 - search_radius,
                              m_init.offset_1 + search_radius)

    m_rotate = Rotate2D(rotation)
    if rotate:
        m_rotate.angle.bounds = (rotation - 5, rotation + 5)
        m_init = m_rotate | m_init
    elif abs(rotation) > rot_threshold:
        m_rotate.angle.fixed = True
        m_init = m_rotate | m_init
        log.warning("A rotation of {:.3f} degrees is expected but the "
                    "rotation is fixed".format(rotation))

    m_magnify = Scale2D(magnification)
    if scale:
        m_magnify.factor.bounds = (magnification - 0.05, magnification + 0.05)
        m_init = m_magnify | m_init
    elif abs(magnification - 1) > mag_threshold:
        m_magnify.factor.fixed = True
        m_init = m_magnify | m_init
        log.warning("A magnification of {:.4f} is expected but the "
                    "magnification is fixed".format(magnification))

    # Perform the fit
    m_final = fit_model(m_init, incoords, refcoords, sigma=10, brute=brute)
    if return_matches:
        matched = match_sources(m_final(*incoords),
                                refcoords,
                                radius=match_radius)
        ind2 = np.where(matched >= 0)
        ind1 = matched[ind2]
        obj_list = [[], []] if len(ind1) < 1 else [
            np.array(list(zip(*incoords)))[ind2],
            np.array(list(zip(*refcoords)))[ind1]
        ]
        return obj_list, m_final
    return m_final
def create_polynomial_transform(transform,
                                in_coords,
                                ref_coords,
                                order=3,
                                max_iters=5,
                                match_radius=0.1,
                                clip=True,
                                log=None):
    """
    This function maps a set of 2D input coordinates to a set of 2D reference
    coordiantes using a pair of Polynomial2D object (one for each ordinate),
    given an initial transforming model.

    Parameters
    ----------
    transform : astropy.models.Model
        the initial guess of the transform between coordinate frames
    in_coords : 2-tuple of sequences
        input coordinates being mapped to reference
    ref_coords : 2-tuple of sequences
        reference coordinates
    order : int
        order of polynomial fit in each ordinate
    max_iters : int
        maximum number of iterations to perform
    match_radius : float
        matching radius for sources (in units of the reference coords)
    clip : bool
        sigma-clip sources after matching?
    log : logging object

    Returns
    -------
    transform : Model
        a model (and its inverse) to map in_coords to ref_coords
    matched: ndarray
        matched incoord for each refcoord
    """
    affine = adwcs.calculate_affine_matrices(transform, shape=(100, 100))
    num_params = [
        len(models.Polynomial2D(degree=i).parameters) for i in range(order + 1)
    ]

    orig_order = last_order = order
    xref, yref = ref_coords
    xin, yin = in_coords
    if clip:
        fit_it = fitting.FittingWithOutlierRemoval(fitting.LinearLSQFitter(),
                                                   sigma_clip,
                                                   sigma=3)
    else:
        fit_it = fitting.LinearLSQFitter()

    matched = match_sources((xref, yref),
                            transform(xin, yin),
                            radius=match_radius)
    num_matched = np.sum(matched >= 0)
    niter = 0
    while True:
        # No point trying to compute a more complex model if it will
        # be insufficiently constrained
        order = min(np.searchsorted(num_params, num_matched, side="right"),
                    orig_order)
        if order < last_order:
            log.warning(f"Downgrading fit to order {order} due to "
                        "limited number of matches.")
        elif order > last_order:
            log.stdinfo(f"Upgrading fit to order {order} due to "
                        "increased number of matches.")

        xmodel = models.Polynomial2D(degree=order,
                                     c0_0=affine.offset[1],
                                     c1_0=affine.matrix[1, 1],
                                     c0_1=affine.matrix[1, 0])
        ymodel = models.Polynomial2D(degree=order,
                                     c0_0=affine.offset[0],
                                     c1_0=affine.matrix[0, 1],
                                     c0_1=affine.matrix[0, 0])
        old_num_matched = num_matched
        xobj_matched, yobj_matched = [], []
        xref_matched, yref_matched = [], []
        for i, m in enumerate(matched):
            if m >= 0:
                xref_matched.append(xref[i])
                yref_matched.append(yref[i])
                xobj_matched.append(xin[m])
                yobj_matched.append(yin[m])
        xmodel = fit_it(xmodel, np.array(xobj_matched), np.array(yobj_matched),
                        xref_matched)
        ymodel = fit_it(ymodel, np.array(xobj_matched), np.array(yobj_matched),
                        yref_matched)
        if clip:
            xmodel, ymodel = xmodel[0], ymodel[0]
        transform = models.Mapping((0, 1, 0, 1)) | (xmodel & ymodel)
        matched = match_sources((xref, yref),
                                transform(xin, yin),
                                radius=match_radius)
        num_matched = np.sum(matched >= 0)
        last_order = order
        niter += 1
        log.debug(f"Iteration {niter}: Matched {num_matched} objects")
        if num_matched == old_num_matched or niter > max_iters:
            break
    xmodel_inv = fit_it(xmodel, np.array(xref_matched), np.array(yref_matched),
                        xobj_matched)
    ymodel_inv = fit_it(ymodel, np.array(xref_matched), np.array(yref_matched),
                        yobj_matched)
    if clip:
        xmodel_inv, ymodel_inv = xmodel_inv[0], ymodel_inv[0]
    transform.inverse = models.Mapping(
        (0, 1, 0, 1)) | (xmodel_inv & ymodel_inv)
    return transform, matched
Example #4
0
def find_alignment_transform(incoords,
                             refcoords,
                             transform=None,
                             shape=None,
                             search_radius=10,
                             match_radius=2,
                             rotate=False,
                             scale=False,
                             brute=True,
                             sigma=5,
                             factor=None,
                             return_matches=False):
    """
    This function computes a transform that maps one set of coordinates to
    another. By default, only a shift is used, by a rotation and magnification
    can also be applied if requested. An initial transform may be supplied
    and, if so, its affine approximation will be used as a starting point.

    Parameters
    ----------
    incoords: tuple
        x-coords and y-coords of objects in input image
    refcoords: tuple
        x-coords and y-coords of objects in reference image
    transform: Transform/None
        existing transformation (if None, will do brute search)
    shape: 2-tuple/None
        shape (standard python order, y-first)
    search_radius: float
        size of search box (in pixels)
    match_radius: float
        matching radius for objects (in pixels)
    rotate: bool
        add a rotation to the alignment transform?
    scale: bool
        add a magnification to the alignment transform?
    brute: bool
        perform brute (landscape) search first?
    sigma: float
        scale-length for source matching
    factor: float/None
        scaling factor to convert coordinates to pixels in the BruteLandscapeFitter()
    return_matches: bool
        return a list of matched objects as well as the Transform?

    Returns
    -------
    Model: alignment transform
    matches: 2 lists (optional)
        OBJCAT sources in input and reference that are matched
    """
    log = logutils.get_logger(__name__)
    if shape is None:
        shape = tuple(max(c) - min(c) for c in incoords)

    largest_dimension = max(*shape)
    # Values larger than these result in errors of >1 pixel
    mag_threshold = 1. / largest_dimension
    rot_threshold = np.degrees(mag_threshold)

    # Set up the initial model
    if transform is None:
        transform = models.Identity(2)

    # We always refactor the transform (if provided) in a prescribed way so
    # as to ensure it's fittable and not overly weird
    affine = adwcs.calculate_affine_matrices(transform, shape)
    m_init = models.Shift(affine.offset[1]) & models.Shift(affine.offset[0])

    # This is approximate since the affine matrix might have differential
    # scaling and a shear
    magnification = np.sqrt(abs(np.linalg.det(affine.matrix)))
    rotation = np.degrees(
        np.arctan2(affine.matrix[0, 1] - affine.matrix[1, 0],
                   affine.matrix[0, 0] + affine.matrix[1, 1]))
    m_init.offset_0.bounds = (m_init.offset_0 - search_radius,
                              m_init.offset_0 + search_radius)
    m_init.offset_1.bounds = (m_init.offset_1 - search_radius,
                              m_init.offset_1 + search_radius)

    m_rotate = Rotate2D(rotation)
    if rotate:
        m_rotate.angle.bounds = (rotation - 5, rotation + 5)
        m_init = m_rotate | m_init
    elif abs(rotation) > rot_threshold:
        m_rotate.angle.fixed = True
        m_init = m_rotate | m_init
        log.warning("A rotation of {:.3f} degrees is expected but the "
                    "rotation is fixed".format(rotation))

    m_magnify = Scale2D(magnification)
    if scale:
        m_magnify.factor.bounds = (magnification - 0.05, magnification + 0.05)
        m_init = m_magnify | m_init
    elif abs(magnification - 1) > mag_threshold:
        m_magnify.factor.fixed = True
        m_init = m_magnify | m_init
        log.warning("A magnification of {:.4f} is expected but the "
                    "magnification is fixed".format(magnification))

    # Tolerance here aims to achieve <0.1 pixel differences in the tests
    m_final = fit_model(m_init,
                        incoords,
                        refcoords,
                        sigma=sigma,
                        scale=factor,
                        brute=brute,
                        tolerance=sigma * 1e-5)
    if return_matches:
        matched = match_sources(m_final(*incoords),
                                refcoords,
                                radius=match_radius)
        ind2 = np.where(matched >= 0)
        ind1 = matched[ind2]
        obj_list = [[], []] if len(ind1) < 1 else [
            np.array(list(zip(*incoords)))[ind2],
            np.array(list(zip(*refcoords)))[ind1]
        ]
        return m_final, obj_list
    return m_final
Example #5
0
    def resampleToCommonFrame(self, adinputs=None, **params):
        """
        This primitive applies the transformation encoded in the input images
        WCSs to align them with a reference image, in reference image pixel
        coordinates. The reference image is taken to be the first image in
        the input list if not explicitly provided as a parameter.

        By default, the transformation into the reference frame is done via
        interpolation. The variance plane, if present, is transformed in
        the same way as the science data.

        The data quality plane, if present, is handled in a bitwise manner
        with each bit of each pixel in the output image being set it it has
        >1% influence from that bit of a bad pixel. The transformed masks are
        then added back together to generate the transformed DQ plane.

        The WCS objects of the output images are updated to reflect the
        transformation.

        Parameters
        ----------
        suffix : str
            suffix to be added to output files
        order : int (0-5)
            order of interpolation (0=nearest, 1=linear, etc.)
        trim_data : bool
            trim image to size of reference image?
        clean_data : bool
            replace bad pixels with a ring median of their values to avoid
            ringing if using a high-order interpolation?
        conserve : bool
            conserve flux when resampling to a different pixel scale?
        force_affine : bool
            convert the true resampling transformation to an affine
            approximation? This speeds up the calculation and has a negligible
            effect for instruments lacking significant distortion
        reference : str/AstroData/None
            reference image for resampling (if not provided, the first image
            in the list will be used)
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]
        sfx = params.pop("suffix")
        reference = params.pop("reference")
        trim_data = params.pop("trim_data")
        force_affine = params.pop("force_affine")
        # These two parameters are only for GSAOI and will help to define
        # the output WCS if there's no reference image
        pixel_scale = params.pop("pixel_scale", None)
        position_angle = params.pop("pa", None)

        # TODO: Can we make it so that we don't need to mosaic detectors
        # before doing this? That would mean we only do one interpolation,
        # not two, and that's definitely better!
        if not all(len(ad) == 1 or ad.instrument() == "GSAOI" for ad in adinputs):
            raise OSError("All input images must have only one extension.")

        if isinstance(reference, str):
            reference = astrodata.open(reference)
        elif reference is None and pixel_scale is None:
            # Reference image will be the first AD, so we need 2+
            if len(adinputs) < 2:
                log.warning("No alignment will be performed, since at least "
                            "two input AstroData objects are required for "
                            "resampleToCommonFrame")
                return adinputs

        if reference is None and pixel_scale:
            # This must be GSAOI projecting to the requested geometry
            ad0 = adinputs[0]
            ra, dec = ad0.target_ra(), ad0.target_dec()
            # using SkyCoord facilitates formatting the log
            center = SkyCoord(ra * u.deg, dec * u.deg)
            ra_str = center.ra.to_string(u.hour, precision=3)
            dec_str = center.dec.to_string(u.deg, precision=2, alwayssign=True)
            log.stdinfo(f"Projecting with center {ra_str} {dec_str}\n"
                        f"at PA={position_angle} with pixel scale={pixel_scale}")
            pixel_scale /= 3600
            new_wcs = (models.Scale(-pixel_scale) & models.Scale(pixel_scale) |
                       models.Rotation2D(position_angle) |
                       models.Pix2Sky_TAN() |
                       models.RotateNative2Celestial(ra, dec, 180))
            ref_wcs = gWCS([(ad0[0].wcs.input_frame, new_wcs),
                            (ad0[0].wcs.output_frame, None)])
            if trim_data:
                log.warning("Setting trim_data=False as required when no "
                            "reference imagevis provided.")
                trim_data = False
        else:
            if reference is None:
                reference = adinputs[0]
            else:
                log.stdinfo(f"Using {reference.filename} as reference image")
                if not trim_data:
                    log.warning("Setting trim_data=True to trim to size of the "
                                "reference image.")
                    trim_data = True
            if len(reference) != 1:
                raise OSError("Reference image must have only one extension.")
            ref_wcs = reference[0].wcs

        if trim_data:
            params.update({'origin': (0,) * len(reference[0].shape),
                           'output_shape': reference[0].shape})

        # No transform for the reference AD
        for ad in adinputs:
            transforms = []
            if reference is ad:
                transforms.append(models.Identity(len(ad[0].shape)))
            else:
                for ext in ad:
                    t_align = ext.wcs.forward_transform | ref_wcs.backward_transform
                    if force_affine:
                        affine = adwcs.calculate_affine_matrices(t_align, ext.shape)
                        t_align = models.AffineTransformation2D(matrix=affine.matrix[::-1, ::-1],
                                                                translation=affine.offset[::-1])
                    transforms.append(t_align)

            for ext, t_align in zip(ad, transforms):
                resampled_frame = copy(ext.wcs.input_frame)
                resampled_frame.name = "resampled"
                ext.wcs = gWCS([(ext.wcs.input_frame, t_align),
                                (resampled_frame, ref_wcs.pipeline[0].transform)] +
                                 ref_wcs.pipeline[1:])

        adoutputs = self._resample_to_new_frame(adinputs, frame="resampled",
                                                process_objcat=False, **params)
        for ad in adoutputs:
            try:
                trans_data = ad.nddata[0].meta.pop('transform')
            except KeyError:
                pass
            else:
                corners = np.array(trans_data['corners'][0])
                ncorners = len(corners)
                ad.hdr["AREATYPE"] = (f"P{ncorners}",
                                      f"Region with {ncorners} vertices")
                for i, corner in enumerate(zip(*corners), start=1):
                    for axis, value in enumerate(reversed(corner), start=1):
                        key_name = f"AREA{i}_{axis}"
                        key_comment = f"Vertex {i}, dimension {axis}"
                        ad.hdr[key_name] = (value + 1, key_comment)
                jfactor = trans_data['jfactors'][0]
                ad.hdr["JFACTOR"] = (jfactor, "J-factor in resampling")

            ad.update_filename(suffix=sfx, strip=True)
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)

        return adoutputs
Example #6
0
    def applyStackedObjectMask(self, adinputs=None, **params):
        """
        This primitive takes an image with an OBJMASK and transforms that
        OBJMASK onto the pixel planes of the input images, using their WCS
        information. If the first image is a stack, this allows us to mask
        fainter objects than can be detected in the individual input images.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        source: str
            name of stream containing single stacked image
        order: int (0-5)
            order of interpolation
        threshold: float
            threshold above which an interpolated pixel should be flagged
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        source = params["source"]
        order = params["order"]
        threshold = params["threshold"]
        sfx = params["suffix"]
        force_affine = True

        try:
            source_stream = self.streams[source]
        except KeyError:
            try:
                ad_source = astrodata.open(source)
            except:
                log.warning(f"Cannot find stream or file named {source}. Continuing.")
                return adinputs
        else:
            if len(source_stream) != 1:
                log.warning(f"Stream {source} does not contain single "
                            "AstroData object. Continuing.")
                return adinputs
            ad_source = source_stream[0]

        # There's no reason why we can't handle multiple extensions
        if any(len(ad) != len(ad_source) for ad in adinputs):
            log.warning("At least one AstroData input has a different number "
                        "of extensions to the reference. Continuing.")
            return adinputs

        for ad in adinputs:
            for ext, source_ext in zip(ad, ad_source):
                if getattr(ext, 'OBJMASK') is not None:
                    t_align = source_ext.wcs.forward_transform | ext.wcs.backward_transform
                    if force_affine:
                        affine = adwcs.calculate_affine_matrices(t_align.inverse, ad[0].shape)
                        objmask = affine_transform(source_ext.OBJMASK.astype(np.float32),
                                                   affine.matrix, affine.offset,
                                                   output_shape=ext.shape, order=order,
                                                   cval=0)
                    else:
                        objmask = transform.Transform(t_align).apply(source_ext.OBJMASK.astype(np.float32),
                                                                     output_shape=ext.shape, order=order,
                                                                     cval=0)
                    ext.OBJMASK = np.where(abs(objmask) > threshold, 1, 0).astype(np.uint8)
                # We will deliberately keep the input image's OBJCAT (if it
                # exists) since this will be required for aligning the inputs.
            ad.update_filename(suffix=sfx, strip=True)

        return adinputs
Example #7
0
    def resampleToCommonFrame(self, adinputs=None, **params):
        """
        This primitive applies the transformation encoded in the input images
        WCSs to align them with a reference image, in reference image pixel
        coordinates. The reference image is taken to be the first image in
        the input list.

        By default, the transformation into the reference frame is done via
        interpolation. The variance plane, if present, is transformed in
        the same way as the science data.

        The data quality plane, if present, is handled in a bitwise manner
        with each bit of each pixel in the output image being set it it has
        >1% influence from that bit of a bad pixel. The transformed masks are
        then added back together to generate the transformed DQ plane.

        The WCS keywords in the headers of the output images are updated
        to reflect the transformation.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        order: int (0-5)
            order of interpolation (0=nearest, 1=linear, etc.)
        trim_data: bool
            trim image to size of reference image?
        clean_data: bool
            replace bad pixels with a ring median of their values to avoid
            ringing if using a high-order interpolation?
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]
        sfx = params.pop("suffix")
        force_affine = True

        if len(adinputs) < 2:
            log.warning("No alignment will be performed, since at least two "
                        "input AstroData objects are required for "
                        "resampleToCommonFrame")
            return adinputs

        # TODO: Can we make it so that we don't need to mosaic detectors
        # before doing this? That would mean we only do one interpolation,
        # not two, and that's definitely better!
        if not all(len(ad) == 1 for ad in adinputs):
            raise OSError("All input images must have only one extension.")

        ad_ref = adinputs[0]
        ndim = len(ad_ref[0].shape)

        # No transform for the reference AD
        for i_ad, ad in enumerate(adinputs):
            if i_ad == 0:
                ref_wcs = ad[0].wcs
                t_align = models.Identity(ndim)
            else:
                t_align = ad[0].wcs.forward_transform | ref_wcs.backward_transform
                if force_affine:
                    affine = adwcs.calculate_affine_matrices(t_align, ad[0].shape)
                    t_align = models.AffineTransformation2D(matrix=affine.matrix[::-1, ::-1],
                                                            translation=affine.offset[::-1])

            resampled_frame = copy(ad[0].wcs.input_frame)
            resampled_frame.name = "resampled"
            ad[0].wcs = gWCS([(ad[0].wcs.input_frame, t_align),
                              (resampled_frame, ref_wcs.pipeline[0].transform)] +
                              ref_wcs.pipeline[1:])

        adoutputs = self._resample_to_new_frame(adinputs, frame="resampled",
                                                process_objcat=False, **params)
        for ad in adoutputs:
            try:
                trans_data = ad.nddata[0].meta.pop('transform')
            except KeyError:
                pass
            else:
                corners = np.array(trans_data['corners'][0])
                ncorners = len(corners)
                ad.hdr["AREATYPE"] = (f"P{ncorners}",
                                      f"Region with {ncorners} vertices")
                for i, corner in enumerate(zip(*corners), start=1):
                    for axis, value in enumerate(reversed(corner), start=1):
                        key_name = f"AREA{i}_{axis}"
                        key_comment = f"Vertex {i}, dimension {axis}"
                        ad.hdr[key_name] = (value + 1, key_comment)
                jfactor = trans_data['jfactors'][0]
                ad.hdr["JFACTOR"] = (jfactor, "J-factor in resampling")

            ad.update_filename(suffix=sfx, strip=True)
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)

        return adoutputs