Beispiel #1
0
def composite_images(images, renderer, magnification=1.0):
    """
    Composite a number of RGBA images into one.  The images are
    composited in the order in which they appear in the `images` list.

    Parameters
    ----------
    images : list of Images
        Each must have a `make_image` method.  For each image,
        `can_composite` should return `True`, though this is not
        enforced by this function.  Each image must have a purely
        affine transformation with no shear.

    renderer : RendererBase instance

    magnification : float
        The additional magnification to apply for the renderer in use.

    Returns
    -------
    tuple : image, offset_x, offset_y
        Returns the tuple:

        - image: A numpy array of the same type as the input images.

        - offset_x, offset_y: The offset of the image (left, bottom)
          in the output figure.
    """
    if len(images) == 0:
        return np.empty((0, 0, 4), dtype=np.uint8), 0, 0

    parts = []
    bboxes = []
    for image in images:
        data, x, y, trans = image.make_image(renderer, magnification)
        if data is not None:
            x *= magnification
            y *= magnification
            parts.append((data, x, y, image.get_alpha() or 1.0))
            bboxes.append(
                Bbox([[x, y], [x + data.shape[1], y + data.shape[0]]]))

    if len(parts) == 0:
        return np.empty((0, 0, 4), dtype=np.uint8), 0, 0

    bbox = Bbox.union(bboxes)

    output = np.zeros((int(bbox.height), int(bbox.width), 4), dtype=np.uint8)

    for data, x, y, alpha in parts:
        trans = Affine2D().translate(x - bbox.x0, y - bbox.y0)
        _image.resample(data,
                        output,
                        trans,
                        _image.NEAREST,
                        resample=False,
                        alpha=alpha)

    return output, bbox.x0 / magnification, bbox.y0 / magnification
Beispiel #2
0
def composite_images(images, renderer, magnification=1.0):
    """
    Composite a number of RGBA images into one.  The images are
    composited in the order in which they appear in the `images` list.

    Parameters
    ----------
    images : list of Images
        Each must have a `make_image` method.  For each image,
        `can_composite` should return `True`, though this is not
        enforced by this function.  Each image must have a purely
        affine transformation with no shear.

    renderer : RendererBase instance

    magnification : float
        The additional magnification to apply for the renderer in use.

    Returns
    -------
    tuple : image, offset_x, offset_y
        Returns the tuple:

        - image: A numpy array of the same type as the input images.

        - offset_x, offset_y: The offset of the image (left, bottom)
          in the output figure.
    """
    if len(images) == 0:
        return np.empty((0, 0, 4), dtype=np.uint8), 0, 0

    parts = []
    bboxes = []
    for image in images:
        data, x, y, trans = image.make_image(renderer, magnification)
        if data is not None:
            x *= magnification
            y *= magnification
            parts.append((data, x, y, image.get_alpha() or 1.0))
            bboxes.append(
                Bbox([[x, y], [x + data.shape[1], y + data.shape[0]]]))

    if len(parts) == 0:
        return np.empty((0, 0, 4), dtype=np.uint8), 0, 0

    bbox = Bbox.union(bboxes)

    output = np.zeros(
        (int(bbox.height), int(bbox.width), 4), dtype=np.uint8)

    for data, x, y, alpha in parts:
        trans = Affine2D().translate(x - bbox.x0, y - bbox.y0)
        _image.resample(data, output, trans, _image.NEAREST,
                        resample=False, alpha=alpha)

    return output, bbox.x0 / magnification, bbox.y0 / magnification
Beispiel #3
0
    def _make_image(self, A, in_bbox, out_bbox, clip_bbox, magnification=1.0,
                    unsampled=False, round_to_pixel_border=True):
        """
        Normalize, rescale and color the image `A` from the given
        in_bbox (in data space), to the given out_bbox (in pixel
        space) clipped to the given clip_bbox (also in pixel space),
        and magnified by the magnification factor.

        `A` may be a greyscale image (MxN) with a dtype of `float32`,
        `float64`, `uint16` or `uint8`, or an RGBA image (MxNx4) with
        a dtype of `float32`, `float64`, or `uint8`.

        If `unsampled` is True, the image will not be scaled, but an
        appropriate affine transformation will be returned instead.

        If `round_to_pixel_border` is True, the output image size will
        be rounded to the nearest pixel boundary.  This makes the
        images align correctly with the axes.  It should not be used
        in cases where you want exact scaling, however, such as
        FigureImage.

        Returns the resulting (image, x, y, trans), where (x, y) is
        the upper left corner of the result in pixel space, and
        `trans` is the affine transformation from the image to pixel
        space.
        """
        if A is None:
            raise RuntimeError('You must first set the image'
                               ' array or the image attribute')

        clipped_bbox = Bbox.intersection(out_bbox, clip_bbox)

        if clipped_bbox is None:
            return None, 0, 0, None

        out_width_base = clipped_bbox.width * magnification
        out_height_base = clipped_bbox.height * magnification

        if out_width_base == 0 or out_height_base == 0:
            return None, 0, 0, None

        if self.origin == 'upper':
            # Flip the input image using a transform.  This avoids the
            # problem with flipping the array, which results in a copy
            # when it is converted to contiguous in the C wrapper
            t0 = Affine2D().translate(0, -A.shape[0]).scale(1, -1)
        else:
            t0 = IdentityTransform()

        t0 += (
            Affine2D()
            .scale(
                in_bbox.width / A.shape[1],
                in_bbox.height / A.shape[0])
            .translate(in_bbox.x0, in_bbox.y0)
            + self.get_transform())

        t = (t0
             + Affine2D().translate(
                 -clipped_bbox.x0,
                 -clipped_bbox.y0)
             .scale(magnification, magnification))

        # So that the image is aligned with the edge of the axes, we want
        # to round up the output width to the next integer.  This also
        # means scaling the transform just slightly to account for the
        # extra subpixel.
        if (t.is_affine and round_to_pixel_border and
                (out_width_base % 1.0 != 0.0 or out_height_base % 1.0 != 0.0)):
            out_width = int(ceil(out_width_base))
            out_height = int(ceil(out_height_base))
            extra_width = (out_width - out_width_base) / out_width_base
            extra_height = (out_height - out_height_base) / out_height_base
            t += Affine2D().scale(
                1.0 + extra_width, 1.0 + extra_height)
        else:
            out_width = int(out_width_base)
            out_height = int(out_height_base)

        if not unsampled:
            created_rgba_mask = False

            if A.ndim not in (2, 3):
                raise ValueError("Invalid dimensions, got %s" % (A.shape,))

            if A.ndim == 2:
                A = self.norm(A)
                if A.dtype.kind == 'f':
                    # If the image is greyscale, convert to RGBA and
                    # use the extra channels for resizing the over,
                    # under, and bad pixels.  This is needed because
                    # Agg's resampler is very aggressive about
                    # clipping to [0, 1] and we use out-of-bounds
                    # values to carry the over/under/bad information
                    rgba = np.empty((A.shape[0], A.shape[1], 4), dtype=A.dtype)
                    rgba[..., 0] = A  # normalized data
                    rgba[..., 1] = A < 0  # under data
                    rgba[..., 2] = A > 1  # over data
                    rgba[..., 3] = ~A.mask  # bad data
                    A = rgba
                    output = np.zeros((out_height, out_width, 4),
                                      dtype=A.dtype)
                    alpha = 1.0
                    created_rgba_mask = True
                else:
                    # colormap norms that output integers (ex NoNorm
                    # and BoundaryNorm) to RGBA space before
                    # interpolating.  This is needed due to the
                    # Agg resampler only working on floats in the
                    # range [0, 1] and because interpolating indexes
                    # into an arbitrary LUT may be problematic.
                    #
                    # This falls back to interpolating in RGBA space which
                    # can produce it's own artifacts of colors not in the map
                    # showing up in the final image.
                    A = self.cmap(A, alpha=self.get_alpha(), bytes=True)

            if not created_rgba_mask:
                # Always convert to RGBA, even if only RGB input
                if A.shape[2] == 3:
                    A = _rgb_to_rgba(A)
                elif A.shape[2] != 4:
                    raise ValueError("Invalid dimensions, got %s" % (A.shape,))

                output = np.zeros((out_height, out_width, 4), dtype=A.dtype)

                alpha = self.get_alpha()
                if alpha is None:
                    alpha = 1.0

            _image.resample(
                A, output, t, _interpd_[self.get_interpolation()],
                self.get_resample(), alpha,
                self.get_filternorm() or 0.0, self.get_filterrad() or 0.0)

            if created_rgba_mask:
                # Convert back to a masked greyscale array so
                # colormapping works correctly
                hid_output = output
                output = np.ma.masked_array(
                    hid_output[..., 0], hid_output[..., 3] < 0.5)
                # relabel under data
                output[hid_output[..., 1] > .5] = -1
                # relabel over data
                output[hid_output[..., 2] > .5] = 2

            output = self.to_rgba(output, bytes=True, norm=False)

            # Apply alpha *after* if the input was greyscale without a mask
            if A.ndim == 2 or created_rgba_mask:
                alpha = self.get_alpha()
                if alpha is not None and alpha != 1.0:
                    alpha_channel = output[:, :, 3]
                    alpha_channel[:] = np.asarray(
                        np.asarray(alpha_channel, np.float32) * alpha,
                        np.uint8)
        else:
            if self._imcache is None:
                self._imcache = self.to_rgba(A, bytes=True, norm=(A.ndim == 2))
            output = self._imcache

            # Subset the input image to only the part that will be
            # displayed
            subset = TransformedBbox(
                clip_bbox, t0.frozen().inverted()).frozen()
            output = output[
                int(max(subset.ymin, 0)):
                int(min(subset.ymax + 1, output.shape[0])),
                int(max(subset.xmin, 0)):
                int(min(subset.xmax + 1, output.shape[1]))]

            t = Affine2D().translate(
                int(max(subset.xmin, 0)), int(max(subset.ymin, 0))) + t

        return output, clipped_bbox.x0, clipped_bbox.y0, t
Beispiel #4
0
    def _make_image(self, A, in_bbox, out_bbox, clip_bbox, magnification=1.0,
                    unsampled=False, round_to_pixel_border=True):
        """
        Normalize, rescale and color the image `A` from the given
        in_bbox (in data space), to the given out_bbox (in pixel
        space) clipped to the given clip_bbox (also in pixel space),
        and magnified by the magnification factor.

        `A` may be a greyscale image (MxN) with a dtype of `float32`,
        `float64`, `uint16` or `uint8`, or an RGBA image (MxNx4) with
        a dtype of `float32`, `float64`, or `uint8`.

        If `unsampled` is True, the image will not be scaled, but an
        appropriate affine transformation will be returned instead.

        If `round_to_pixel_border` is True, the output image size will
        be rounded to the nearest pixel boundary.  This makes the
        images align correctly with the axes.  It should not be used
        in cases where you want exact scaling, however, such as
        FigureImage.

        Returns the resulting (image, x, y, trans), where (x, y) is
        the upper left corner of the result in pixel space, and
        `trans` is the affine transformation from the image to pixel
        space.
        """
        if A is None:
            raise RuntimeError('You must first set the image'
                               ' array or the image attribute')

        clipped_bbox = Bbox.intersection(out_bbox, clip_bbox)

        if clipped_bbox is None:
            return None, 0, 0, None

        out_width_base = clipped_bbox.width * magnification
        out_height_base = clipped_bbox.height * magnification

        if out_width_base == 0 or out_height_base == 0:
            return None, 0, 0, None

        if self.origin == 'upper':
            # Flip the input image using a transform.  This avoids the
            # problem with flipping the array, which results in a copy
            # when it is converted to contiguous in the C wrapper
            t0 = Affine2D().translate(0, -A.shape[0]).scale(1, -1)
        else:
            t0 = IdentityTransform()

        t0 += (
            Affine2D()
            .scale(
                in_bbox.width / A.shape[1],
                in_bbox.height / A.shape[0])
            .translate(in_bbox.x0, in_bbox.y0)
            + self.get_transform())

        t = (t0
             + Affine2D().translate(
                 -clipped_bbox.x0,
                 -clipped_bbox.y0)
             .scale(magnification, magnification))

        # So that the image is aligned with the edge of the axes, we want
        # to round up the output width to the next integer.  This also
        # means scaling the transform just slightly to account for the
        # extra subpixel.
        if (t.is_affine and round_to_pixel_border and
            (out_width_base % 1.0 != 0.0 or
             out_height_base % 1.0 != 0.0)):
            out_width = int(ceil(out_width_base) + 1)
            out_height = int(ceil(out_height_base) + 1)
            extra_width = (out_width - out_width_base) / out_width_base
            extra_height = (out_height - out_height_base) / out_height_base
            t += Affine2D().scale(
                1.0 + extra_width, 1.0 + extra_height)
        else:
            out_width = int(out_width_base)
            out_height = int(out_height_base)

        if not unsampled:
            created_rgba_mask = False

            if A.ndim == 2:
                A = self.norm(A)
                # If the image is greyscale, convert to RGBA with the
                # correct alpha channel for resizing
                rgba = np.empty((A.shape[0], A.shape[1], 4), dtype=A.dtype)
                rgba[..., 0:3] = np.expand_dims(A, 2)
                if A.dtype.kind == 'f':
                    rgba[..., 3] = ~A.mask
                else:
                    rgba[..., 3] = np.where(A.mask, 0, np.iinfo(A.dtype).max)
                A = rgba
                output = np.zeros((out_height, out_width, 4), dtype=A.dtype)
                alpha = 1.0
                created_rgba_mask = True
            elif A.ndim == 3:
                # Always convert to RGBA, even if only RGB input
                if A.shape[2] == 3:
                    A = _rgb_to_rgba(A)
                elif A.shape[2] != 4:
                    raise ValueError("Invalid dimensions, got %s" % (A.shape,))

                output = np.zeros((out_height, out_width, 4), dtype=A.dtype)

                alpha = self.get_alpha()
                if alpha is None:
                    alpha = 1.0
            else:
                raise ValueError("Invalid dimensions, got %s" % (A.shape,))

            _image.resample(
                A, output, t, _interpd_[self.get_interpolation()],
                self.get_resample(), alpha,
                self.get_filternorm() or 0.0, self.get_filterrad() or 0.0)

            if created_rgba_mask:
                # Convert back to a masked greyscale array so
                # colormapping works correctly
                output = np.ma.masked_array(
                    output[..., 0], output[..., 3] < 0.5)

            output = self.to_rgba(output, bytes=True, norm=False)

            # Apply alpha *after* if the input was greyscale without a mask
            if A.ndim == 2 or created_rgba_mask:
                alpha = self.get_alpha()
                if alpha is not None and alpha != 1.0:
                    alpha_channel = output[:, :, 3]
                    alpha_channel[:] = np.asarray(
                        np.asarray(alpha_channel, np.float32) * alpha,
                        np.uint8)
        else:
            if self._imcache is None:
                self._imcache = self.to_rgba(A, bytes=True, norm=(A.ndim == 2))
            output = self._imcache

            # Subset the input image to only the part that will be
            # displayed
            subset = TransformedBbox(
                clip_bbox, t0.frozen().inverted()).frozen()
            output = output[
                int(max(subset.ymin, 0)):
                int(min(subset.ymax + 1, output.shape[0])),
                int(max(subset.xmin, 0)):
                int(min(subset.xmax + 1, output.shape[1]))]

            t = Affine2D().translate(
                int(max(subset.xmin, 0)), int(max(subset.ymin, 0))) + t

        return output, clipped_bbox.x0, clipped_bbox.y0, t
Beispiel #5
0
    def _make_image(self, A, in_bbox, out_bbox, clip_bbox, magnification=1.0,
                    unsampled=False, round_to_pixel_border=True):
        """
        Normalize, rescale and color the image `A` from the given
        in_bbox (in data space), to the given out_bbox (in pixel
        space) clipped to the given clip_bbox (also in pixel space),
        and magnified by the magnification factor.

        `A` may be a greyscale image (MxN) with a dtype of `float32`,
        `float64`, `uint16` or `uint8`, or an RGBA image (MxNx4) with
        a dtype of `float32`, `float64`, or `uint8`.

        If `unsampled` is True, the image will not be scaled, but an
        appropriate affine transformation will be returned instead.

        If `round_to_pixel_border` is True, the output image size will
        be rounded to the nearest pixel boundary.  This makes the
        images align correctly with the axes.  It should not be used
        in cases where you want exact scaling, however, such as
        FigureImage.

        Returns the resulting (image, x, y, trans), where (x, y) is
        the upper left corner of the result in pixel space, and
        `trans` is the affine transformation from the image to pixel
        space.
        """
        if A is None:
            raise RuntimeError('You must first set the image'
                               ' array or the image attribute')

        clipped_bbox = Bbox.intersection(out_bbox, clip_bbox)

        if clipped_bbox is None:
            return None, 0, 0, None

        out_width_base = clipped_bbox.width * magnification
        out_height_base = clipped_bbox.height * magnification

        if out_width_base == 0 or out_height_base == 0:
            return None, 0, 0, None

        if self.origin == 'upper':
            # Flip the input image using a transform.  This avoids the
            # problem with flipping the array, which results in a copy
            # when it is converted to contiguous in the C wrapper
            t0 = Affine2D().translate(0, -A.shape[0]).scale(1, -1)
        else:
            t0 = IdentityTransform()

        t0 += (
            Affine2D()
            .scale(
                in_bbox.width / A.shape[1],
                in_bbox.height / A.shape[0])
            .translate(in_bbox.x0, in_bbox.y0)
            + self.get_transform())

        t = (t0
             + Affine2D().translate(
                 -clipped_bbox.x0,
                 -clipped_bbox.y0)
             .scale(magnification, magnification))

        # So that the image is aligned with the edge of the axes, we want
        # to round up the output width to the next integer.  This also
        # means scaling the transform just slightly to account for the
        # extra subpixel.
        if (t.is_affine and round_to_pixel_border and
            (out_width_base % 1.0 != 0.0 or
             out_height_base % 1.0 != 0.0)):
            out_width = int(ceil(out_width_base) + 1)
            out_height = int(ceil(out_height_base) + 1)
            extra_width = (out_width - out_width_base) / out_width_base
            extra_height = (out_height - out_height_base) / out_height_base
            t += Affine2D().scale(
                1.0 + extra_width, 1.0 + extra_height)
        else:
            out_width = int(out_width_base)
            out_height = int(out_height_base)

        if not unsampled:
            if A.ndim == 2:
                A = self.norm(A)
                if A.dtype.kind == 'f':
                    # For floating-point greyscale images, we treat negative
                    # numbers as transparent.

                    # TODO: Use np.full when we support Numpy 1.9 as a
                    # minimum
                    output = np.empty((out_height, out_width), dtype=A.dtype)
                    output[...] = -100.0
                else:
                    output = np.zeros((out_height, out_width), dtype=A.dtype)

                alpha = 1.0
            elif A.ndim == 3:
                # Always convert to RGBA, even if only RGB input
                if A.shape[2] == 3:
                    A = _rgb_to_rgba(A)
                elif A.shape[2] != 4:
                    raise ValueError("Invalid dimensions, got %s" % (A.shape,))

                output = np.zeros((out_height, out_width, 4), dtype=A.dtype)

                alpha = self.get_alpha()
                if alpha is None:
                    alpha = 1.0
            else:
                raise ValueError("Invalid dimensions, got %s" % (A.shape,))

            _image.resample(
                A, output, t, _interpd_[self.get_interpolation()],
                self.get_resample(), alpha,
                self.get_filternorm() or 0.0, self.get_filterrad() or 0.0)

            output = self.to_rgba(output, bytes=True, norm=False)

            # Apply alpha *after* if the input was greyscale
            if A.ndim == 2:
                alpha = self.get_alpha()
                if alpha is not None and alpha != 1.0:
                    alpha_channel = output[:, :, 3]
                    alpha_channel[:] = np.asarray(
                        np.asarray(alpha_channel, np.float32) * alpha,
                        np.uint8)
        else:
            if self._imcache is None:
                self._imcache = self.to_rgba(A, bytes=True, norm=(A.ndim == 2))
            output = self._imcache

            # Subset the input image to only the part that will be
            # displayed
            subset = TransformedBbox(
                clip_bbox, t0.frozen().inverted()).frozen()
            output = output[
                int(max(subset.ymin, 0)):
                int(min(subset.ymax + 1, output.shape[0])),
                int(max(subset.xmin, 0)):
                int(min(subset.xmax + 1, output.shape[1]))]

            t = Affine2D().translate(
                int(max(subset.xmin, 0)), int(max(subset.ymin, 0))) + t

        return output, clipped_bbox.x0, clipped_bbox.y0, t
    def _make_image(self, A, in_bbox, out_bbox, clip_bbox, magnification=1.0,
                    unsampled=False, round_to_pixel_border=True):
        """
        Normalize, rescale and color the image `A` from the given
        in_bbox (in data space), to the given out_bbox (in pixel
        space) clipped to the given clip_bbox (also in pixel space),
        and magnified by the magnification factor.

        `A` may be a greyscale image (MxN) with a dtype of `float32`,
        `float64`, `uint16` or `uint8`, or an RGBA image (MxNx4) with
        a dtype of `float32`, `float64`, or `uint8`.

        If `unsampled` is True, the image will not be scaled, but an
        appropriate affine transformation will be returned instead.

        If `round_to_pixel_border` is True, the output image size will
        be rounded to the nearest pixel boundary.  This makes the
        images align correctly with the axes.  It should not be used
        in cases where you want exact scaling, however, such as
        FigureImage.

        Returns the resulting (image, x, y, trans), where (x, y) is
        the upper left corner of the result in pixel space, and
        `trans` is the affine transformation from the image to pixel
        space.
        """
        if A is None:
            raise RuntimeError('You must first set the image'
                               ' array or the image attribute')

        clipped_bbox = Bbox.intersection(out_bbox, clip_bbox)

        if clipped_bbox is None:
            return None, 0, 0, None

        out_width_base = clipped_bbox.width * magnification
        out_height_base = clipped_bbox.height * magnification

        if out_width_base == 0 or out_height_base == 0:
            return None, 0, 0, None

        if self.origin == 'upper':
            # Flip the input image using a transform.  This avoids the
            # problem with flipping the array, which results in a copy
            # when it is converted to contiguous in the C wrapper
            t0 = Affine2D().translate(0, -A.shape[0]).scale(1, -1)
        else:
            t0 = IdentityTransform()

        t0 += (
            Affine2D()
            .scale(
                in_bbox.width / A.shape[1],
                in_bbox.height / A.shape[0])
            .translate(in_bbox.x0, in_bbox.y0)
            + self.get_transform())

        t = (t0
             + Affine2D().translate(
                 -clipped_bbox.x0,
                 -clipped_bbox.y0)
             .scale(magnification, magnification))

        # So that the image is aligned with the edge of the axes, we want
        # to round up the output width to the next integer.  This also
        # means scaling the transform just slightly to account for the
        # extra subpixel.
        if (t.is_affine and round_to_pixel_border and
                (out_width_base % 1.0 != 0.0 or out_height_base % 1.0 != 0.0)):
            out_width = int(ceil(out_width_base))
            out_height = int(ceil(out_height_base))
            extra_width = (out_width - out_width_base) / out_width_base
            extra_height = (out_height - out_height_base) / out_height_base
            t += Affine2D().scale(
                1.0 + extra_width, 1.0 + extra_height)
        else:
            out_width = int(out_width_base)
            out_height = int(out_height_base)

        if not unsampled:
            created_rgba_mask = False

            if A.ndim not in (2, 3):
                raise ValueError("Invalid dimensions, got %s" % (A.shape,))

            if A.ndim == 2:
                A = self.norm(A)
                if A.dtype.kind == 'f':
                    # If the image is greyscale, convert to RGBA and
                    # use the extra channels for resizing the over,
                    # under, and bad pixels.  This is needed because
                    # Agg's resampler is very aggressive about
                    # clipping to [0, 1] and we use out-of-bounds
                    # values to carry the over/under/bad information
                    rgba = np.empty((A.shape[0], A.shape[1], 4), dtype=A.dtype)
                    rgba[..., 0] = A  # normalized data
                    # this is to work around spurious warnings coming
                    # out of masked arrays.
                    with np.errstate(invalid='ignore'):
                        rgba[..., 1] = A < 0  # under data
                        rgba[..., 2] = A > 1  # over data
                    rgba[..., 3] = ~A.mask  # bad data
                    A = rgba
                    output = np.zeros((out_height, out_width, 4),
                                      dtype=A.dtype)
                    alpha = 1.0
                    created_rgba_mask = True
                else:
                    # colormap norms that output integers (ex NoNorm
                    # and BoundaryNorm) to RGBA space before
                    # interpolating.  This is needed due to the
                    # Agg resampler only working on floats in the
                    # range [0, 1] and because interpolating indexes
                    # into an arbitrary LUT may be problematic.
                    #
                    # This falls back to interpolating in RGBA space which
                    # can produce it's own artifacts of colors not in the map
                    # showing up in the final image.
                    A = self.cmap(A, alpha=self.get_alpha(), bytes=True)

            if not created_rgba_mask:
                # Always convert to RGBA, even if only RGB input
                if A.shape[2] == 3:
                    A = _rgb_to_rgba(A)
                elif A.shape[2] != 4:
                    raise ValueError("Invalid dimensions, got %s" % (A.shape,))

                output = np.zeros((out_height, out_width, 4), dtype=A.dtype)

                alpha = self.get_alpha()
                if alpha is None:
                    alpha = 1.0

            _image.resample(
                A, output, t, _interpd_[self.get_interpolation()],
                self.get_resample(), alpha,
                self.get_filternorm() or 0.0, self.get_filterrad() or 0.0)

            if created_rgba_mask:
                # Convert back to a masked greyscale array so
                # colormapping works correctly
                hid_output = output
                output = np.ma.masked_array(
                    hid_output[..., 0], hid_output[..., 3] < 0.5)
                # relabel under data
                output[hid_output[..., 1] > .5] = -1
                # relabel over data
                output[hid_output[..., 2] > .5] = 2

            output = self.to_rgba(output, bytes=True, norm=False)

            # Apply alpha *after* if the input was greyscale without a mask
            if A.ndim == 2 or created_rgba_mask:
                alpha = self.get_alpha()
                if alpha is not None and alpha != 1.0:
                    alpha_channel = output[:, :, 3]
                    alpha_channel[:] = np.asarray(
                        np.asarray(alpha_channel, np.float32) * alpha,
                        np.uint8)
        else:
            if self._imcache is None:
                self._imcache = self.to_rgba(A, bytes=True, norm=(A.ndim == 2))
            output = self._imcache

            # Subset the input image to only the part that will be
            # displayed
            subset = TransformedBbox(
                clip_bbox, t0.frozen().inverted()).frozen()
            output = output[
                int(max(subset.ymin, 0)):
                int(min(subset.ymax + 1, output.shape[0])),
                int(max(subset.xmin, 0)):
                int(min(subset.xmax + 1, output.shape[1]))]

            t = Affine2D().translate(
                int(max(subset.xmin, 0)), int(max(subset.ymin, 0))) + t

        return output, clipped_bbox.x0, clipped_bbox.y0, t