def get_tick_transform(self, axes):
     return IdentityTransform()  # axes.transData
예제 #2
0
파일: image.py 프로젝트: zyh329/matplotlib
    def _make_image(self, A, in_bbox, out_bbox, clip_bbox, magnification=1.0,
                    unsampled=False, round_to_pixel_border=True):
        """
        Normalize, rescale and color the image `A` from the given
        in_bbox (in data space), to the given out_bbox (in pixel
        space) clipped to the given clip_bbox (also in pixel space),
        and magnified by the magnification factor.

        `A` may be a greyscale image (MxN) with a dtype of `float32`,
        `float64`, `uint16` or `uint8`, or an RGBA image (MxNx4) with
        a dtype of `float32`, `float64`, or `uint8`.

        If `unsampled` is True, the image will not be scaled, but an
        appropriate affine transformation will be returned instead.

        If `round_to_pixel_border` is True, the output image size will
        be rounded to the nearest pixel boundary.  This makes the
        images align correctly with the axes.  It should not be used
        in cases where you want exact scaling, however, such as
        FigureImage.

        Returns the resulting (image, x, y, trans), where (x, y) is
        the upper left corner of the result in pixel space, and
        `trans` is the affine transformation from the image to pixel
        space.
        """
        if A is None:
            raise RuntimeError('You must first set the image'
                               ' array or the image attribute')

        clipped_bbox = Bbox.intersection(out_bbox, clip_bbox)

        if clipped_bbox is None:
            return None, 0, 0, None

        out_width_base = clipped_bbox.width * magnification
        out_height_base = clipped_bbox.height * magnification

        if out_width_base == 0 or out_height_base == 0:
            return None, 0, 0, None

        if self.origin == 'upper':
            # Flip the input image using a transform.  This avoids the
            # problem with flipping the array, which results in a copy
            # when it is converted to contiguous in the C wrapper
            t0 = Affine2D().translate(0, -A.shape[0]).scale(1, -1)
        else:
            t0 = IdentityTransform()

        t0 += (
            Affine2D()
            .scale(
                in_bbox.width / A.shape[1],
                in_bbox.height / A.shape[0])
            .translate(in_bbox.x0, in_bbox.y0)
            + self.get_transform())

        t = (t0
             + Affine2D().translate(
                 -clipped_bbox.x0,
                 -clipped_bbox.y0)
             .scale(magnification, magnification))

        # So that the image is aligned with the edge of the axes, we want
        # to round up the output width to the next integer.  This also
        # means scaling the transform just slightly to account for the
        # extra subpixel.
        if (t.is_affine and round_to_pixel_border and
                (out_width_base % 1.0 != 0.0 or out_height_base % 1.0 != 0.0)):
            out_width = int(ceil(out_width_base))
            out_height = int(ceil(out_height_base))
            extra_width = (out_width - out_width_base) / out_width_base
            extra_height = (out_height - out_height_base) / out_height_base
            t += Affine2D().scale(
                1.0 + extra_width, 1.0 + extra_height)
        else:
            out_width = int(out_width_base)
            out_height = int(out_height_base)

        if not unsampled:
            created_rgba_mask = False

            if A.ndim not in (2, 3):
                raise ValueError("Invalid dimensions, got %s" % (A.shape,))

            if A.ndim == 2:
                A = self.norm(A)
                if A.dtype.kind == 'f':
                    # If the image is greyscale, convert to RGBA and
                    # use the extra channels for resizing the over,
                    # under, and bad pixels.  This is needed because
                    # Agg's resampler is very aggressive about
                    # clipping to [0, 1] and we use out-of-bounds
                    # values to carry the over/under/bad information
                    rgba = np.empty((A.shape[0], A.shape[1], 4), dtype=A.dtype)
                    rgba[..., 0] = A  # normalized data
                    # this is to work around spurious warnings coming
                    # out of masked arrays.
                    with np.errstate(invalid='ignore'):
                        rgba[..., 1] = A < 0  # under data
                        rgba[..., 2] = A > 1  # over data
                    rgba[..., 3] = ~A.mask  # bad data
                    A = rgba
                    output = np.zeros((out_height, out_width, 4),
                                      dtype=A.dtype)
                    alpha = 1.0
                    created_rgba_mask = True
                else:
                    # colormap norms that output integers (ex NoNorm
                    # and BoundaryNorm) to RGBA space before
                    # interpolating.  This is needed due to the
                    # Agg resampler only working on floats in the
                    # range [0, 1] and because interpolating indexes
                    # into an arbitrary LUT may be problematic.
                    #
                    # This falls back to interpolating in RGBA space which
                    # can produce it's own artifacts of colors not in the map
                    # showing up in the final image.
                    A = self.cmap(A, alpha=self.get_alpha(), bytes=True)

            if not created_rgba_mask:
                # Always convert to RGBA, even if only RGB input
                if A.shape[2] == 3:
                    A = _rgb_to_rgba(A)
                elif A.shape[2] != 4:
                    raise ValueError("Invalid dimensions, got %s" % (A.shape,))

                output = np.zeros((out_height, out_width, 4), dtype=A.dtype)

                alpha = self.get_alpha()
                if alpha is None:
                    alpha = 1.0

            _image.resample(
                A, output, t, _interpd_[self.get_interpolation()],
                self.get_resample(), alpha,
                self.get_filternorm() or 0.0, self.get_filterrad() or 0.0)

            if created_rgba_mask:
                # Convert back to a masked greyscale array so
                # colormapping works correctly
                hid_output = output
                output = np.ma.masked_array(
                    hid_output[..., 0], hid_output[..., 3] < 0.5)
                # relabel under data
                output[hid_output[..., 1] > .5] = -1
                # relabel over data
                output[hid_output[..., 2] > .5] = 2

            output = self.to_rgba(output, bytes=True, norm=False)

            # Apply alpha *after* if the input was greyscale without a mask
            if A.ndim == 2 or created_rgba_mask:
                alpha = self.get_alpha()
                if alpha is not None and alpha != 1.0:
                    alpha_channel = output[:, :, 3]
                    alpha_channel[:] = np.asarray(
                        np.asarray(alpha_channel, np.float32) * alpha,
                        np.uint8)
        else:
            if self._imcache is None:
                self._imcache = self.to_rgba(A, bytes=True, norm=(A.ndim == 2))
            output = self._imcache

            # Subset the input image to only the part that will be
            # displayed
            subset = TransformedBbox(
                clip_bbox, t0.frozen().inverted()).frozen()
            output = output[
                int(max(subset.ymin, 0)):
                int(min(subset.ymax + 1, output.shape[0])),
                int(max(subset.xmin, 0)):
                int(min(subset.xmax + 1, output.shape[1]))]

            t = Affine2D().translate(
                int(max(subset.xmin, 0)), int(max(subset.ymin, 0))) + t

        return output, clipped_bbox.x0, clipped_bbox.y0, t
예제 #3
0
 def get_transform(self):
     """
     Return the transform for linear scaling, which is just the
     `~matplotlib.transforms.IdentityTransform`.
     """
     return IdentityTransform()
예제 #4
0
 def get_transform(self):
     """
     The transform for linear scaling is just the
     :class:`~matplotlib.transforms.IdentityTransform`.
     """
     return IdentityTransform()
if __name__ == "__main__":

    usetex = plt.rcParams["text.usetex"]

    fig = plt.figure()

    # EXAMPLE 1

    ax = plt.subplot(211)

    arr = plt.imread(get_sample_data("grace_hopper.png"))

    text_path = TextPath((0, 0), "!?", size=150)
    p = PathClippedImagePatch(text_path, arr, ec="k",
                              transform=IdentityTransform())

    # p.set_clip_on(False)

    # make offset box
    offsetbox = AuxTransformBox(IdentityTransform())
    offsetbox.add_artist(p)

    # make anchored offset box
    ao = AnchoredOffsetbox(loc='upper left', child=offsetbox, frameon=True,
                           borderpad=0.2)
    ax.add_artist(ao)

    # another text
    from matplotlib.patches import PathPatch
    if usetex:
예제 #6
0
def taylor_plot(data_list, ax=None, layer_opts=None, *args, **kwargs):
    """
    Construct a Taylor diagram from the data list provided. Taylor plots are a bit special!
     Layer_opts are parsed for itemstyle and color which are then combined to be passed to the plotting routines. This
     allows reuse of familiar command line kwargs while maintaining a simple API.

    :param list data_list: List of CommonData objexts
    :param ax: Optional axis - although we don't use a standard axis for this plot so using the default is strongly
    recommended
    :param list layer_opts: A list of dictionaries optionally containing labels, itemstyles and colors for each data object
    :param list args: Optional extra arguments
    :param dict kwargs: Optional extra keyword arguments
    :return: Taylor plot and axes instances
    """
    import matplotlib.pyplot as plt
    from cis.plotting.taylor import ArcCosTransform
    from matplotlib.projections import PolarAxes
    from matplotlib.transforms import IdentityTransform, blended_transform_factory
    import mpl_toolkits.axisartist.floating_axes as floating_axes

    _ = kwargs.pop('central_longitude',
                   None)  # In case the Plotter has added it...

    layer_opts = [{} for i in data_list] if layer_opts is None else layer_opts
    labels = [layer_opt.pop('label', None) for layer_opt in layer_opts]

    # Pull together markers from the layer_opts
    markers = [layer_opt.pop('itemstyle', None) for layer_opt in layer_opts]
    if all(m is None for m in markers):
        # If all markers are None then just set the list to None
        markers = None
    elif any(m is None for m in markers):
        # If not all are None, but some are then we have a problem
        raise ValueError(
            "If any markers are set then a marker must be set for every dataset"
        )

    # Pull together colors from the layer_opts
    colors = [layer_opt.pop('color', None) for layer_opt in layer_opts]
    if all(c is None for c in colors):
        # If all markers are None then just set the list to None
        colors = None
    elif any(c is None for c in colors):
        # If not all are None, but some are then we have a problem
        raise ValueError(
            "If any markers are set then a marker must be set for every dataset"
        )
    kwargs['itemwidth'] = layer_opts[0].pop('itemwidth', None)
    plot = Taylor(data_list, labels, colors, markers, *args, **kwargs)

    if ax is None:
        fig = plt.figure()

        tr = blended_transform_factory(
            ArcCosTransform(),
            IdentityTransform()) + PolarAxes.PolarTransform()

        gh = floating_axes.GridHelperCurveLinear(tr,
                                                 extremes=(plot.extend, 1., 0.,
                                                           plot.gammamax),
                                                 grid_locator1=None,
                                                 grid_locator2=None,
                                                 tick_formatter1=None,
                                                 tick_formatter2=None)
        ax = floating_axes.FloatingSubplot(fig, 1, 1, 1, grid_helper=gh)
        fig.add_subplot(ax)

    ax = plot(ax)

    return plot, ax
예제 #7
0
fig = plt.figure()
rgba1 = text_to_rgba(r"IQ: $\sigma_i=15$", color="blue", fontsize=20, dpi=200)
rgba2 = text_to_rgba(r"some other string", color="red", fontsize=20, dpi=200)
# One can then draw such text images to a Figure using `.Figure.figimage`.
fig.figimage(rgba1, 100, 50)
fig.figimage(rgba2, 100, 150)

# One can also directly draw texts to a figure with positioning
# in pixel coordinates by using `.Figure.text` together with
# `.transforms.IdentityTransform`.
fig.text(100,
         250,
         r"IQ: $\sigma_i=15$",
         color="blue",
         fontsize=20,
         transform=IdentityTransform())
fig.text(100,
         350,
         r"some other string",
         color="red",
         fontsize=20,
         transform=IdentityTransform())

plt.show()

#############################################################################
#
# .. admonition:: References
#
#    The use of the following functions, methods, classes and modules is shown
#    in this example:
예제 #8
0
파일: image.py 프로젝트: Perados/matplotlib
    def _make_image(self, A, in_bbox, out_bbox, clip_bbox, magnification=1.0,
                    unsampled=False, round_to_pixel_border=True):
        """
        Normalize, rescale and color the image `A` from the given
        in_bbox (in data space), to the given out_bbox (in pixel
        space) clipped to the given clip_bbox (also in pixel space),
        and magnified by the magnification factor.

        `A` may be a greyscale image (MxN) with a dtype of `float32`,
        `float64`, `uint16` or `uint8`, or an RGBA image (MxNx4) with
        a dtype of `float32`, `float64`, or `uint8`.

        If `unsampled` is True, the image will not be scaled, but an
        appropriate affine transformation will be returned instead.

        If `round_to_pixel_border` is True, the output image size will
        be rounded to the nearest pixel boundary.  This makes the
        images align correctly with the axes.  It should not be used
        in cases where you want exact scaling, however, such as
        FigureImage.

        Returns the resulting (image, x, y, trans), where (x, y) is
        the upper left corner of the result in pixel space, and
        `trans` is the affine transformation from the image to pixel
        space.
        """
        if A is None:
            raise RuntimeError('You must first set the image'
                               ' array or the image attribute')

        clipped_bbox = Bbox.intersection(out_bbox, clip_bbox)

        if clipped_bbox is None:
            return None, 0, 0, None

        out_width_base = clipped_bbox.width * magnification
        out_height_base = clipped_bbox.height * magnification

        if out_width_base == 0 or out_height_base == 0:
            return None, 0, 0, None

        if self.origin == 'upper':
            # Flip the input image using a transform.  This avoids the
            # problem with flipping the array, which results in a copy
            # when it is converted to contiguous in the C wrapper
            t0 = Affine2D().translate(0, -A.shape[0]).scale(1, -1)
        else:
            t0 = IdentityTransform()

        t0 += (
            Affine2D()
            .scale(
                in_bbox.width / A.shape[1],
                in_bbox.height / A.shape[0])
            .translate(in_bbox.x0, in_bbox.y0)
            + self.get_transform())

        t = (t0
             + Affine2D().translate(
                 -clipped_bbox.x0,
                 -clipped_bbox.y0)
             .scale(magnification, magnification))

        # So that the image is aligned with the edge of the axes, we want
        # to round up the output width to the next integer.  This also
        # means scaling the transform just slightly to account for the
        # extra subpixel.
        if (t.is_affine and round_to_pixel_border and
            (out_width_base % 1.0 != 0.0 or
             out_height_base % 1.0 != 0.0)):
            out_width = int(ceil(out_width_base) + 1)
            out_height = int(ceil(out_height_base) + 1)
            extra_width = (out_width - out_width_base) / out_width_base
            extra_height = (out_height - out_height_base) / out_height_base
            t += Affine2D().scale(
                1.0 + extra_width, 1.0 + extra_height)
        else:
            out_width = int(out_width_base)
            out_height = int(out_height_base)

        if not unsampled:
            created_rgba_mask = False

            if A.ndim == 2:
                A = self.norm(A)
                # If the image is greyscale, convert to RGBA with the
                # correct alpha channel for resizing
                rgba = np.empty((A.shape[0], A.shape[1], 4), dtype=A.dtype)
                rgba[..., 0:3] = np.expand_dims(A, 2)
                if A.dtype.kind == 'f':
                    rgba[..., 3] = ~A.mask
                else:
                    rgba[..., 3] = np.where(A.mask, 0, np.iinfo(A.dtype).max)
                A = rgba
                output = np.zeros((out_height, out_width, 4), dtype=A.dtype)
                alpha = 1.0
                created_rgba_mask = True
            elif A.ndim == 3:
                # Always convert to RGBA, even if only RGB input
                if A.shape[2] == 3:
                    A = _rgb_to_rgba(A)
                elif A.shape[2] != 4:
                    raise ValueError("Invalid dimensions, got %s" % (A.shape,))

                output = np.zeros((out_height, out_width, 4), dtype=A.dtype)

                alpha = self.get_alpha()
                if alpha is None:
                    alpha = 1.0
            else:
                raise ValueError("Invalid dimensions, got %s" % (A.shape,))

            _image.resample(
                A, output, t, _interpd_[self.get_interpolation()],
                self.get_resample(), alpha,
                self.get_filternorm() or 0.0, self.get_filterrad() or 0.0)

            if created_rgba_mask:
                # Convert back to a masked greyscale array so
                # colormapping works correctly
                output = np.ma.masked_array(
                    output[..., 0], output[..., 3] < 0.5)

            output = self.to_rgba(output, bytes=True, norm=False)

            # Apply alpha *after* if the input was greyscale without a mask
            if A.ndim == 2 or created_rgba_mask:
                alpha = self.get_alpha()
                if alpha is not None and alpha != 1.0:
                    alpha_channel = output[:, :, 3]
                    alpha_channel[:] = np.asarray(
                        np.asarray(alpha_channel, np.float32) * alpha,
                        np.uint8)
        else:
            if self._imcache is None:
                self._imcache = self.to_rgba(A, bytes=True, norm=(A.ndim == 2))
            output = self._imcache

            # Subset the input image to only the part that will be
            # displayed
            subset = TransformedBbox(
                clip_bbox, t0.frozen().inverted()).frozen()
            output = output[
                int(max(subset.ymin, 0)):
                int(min(subset.ymax + 1, output.shape[0])),
                int(max(subset.xmin, 0)):
                int(min(subset.xmax + 1, output.shape[1]))]

            t = Affine2D().translate(
                int(max(subset.xmin, 0)), int(max(subset.ymin, 0))) + t

        return output, clipped_bbox.x0, clipped_bbox.y0, t
예제 #9
0
파일: image.py 프로젝트: rika03/matplotlib
    def _draw_unsampled_image(self, renderer, gc):
        """
        draw unsampled image. The renderer should support a draw_image method
        with scale parameter.
        """
        trans = self.get_transform()  # axes.transData

        # convert the coordinates to the intermediate coordinate (ic).
        # The transformation from the ic to the canvas is a pure
        # affine transform.

        # A straight-forward way is to use the non-affine part of the
        # original transform for conversion to the ic.

        # firs, convert the image extent to the ic
        x_llc, x_trc, y_llc, y_trc = self.get_extent()

        xy = trans.transform(np.array([(x_llc, y_llc), (x_trc, y_trc)]))

        _xx1, _yy1 = xy[0]
        _xx2, _yy2 = xy[1]

        extent_in_ic = _xx1, _xx2, _yy1, _yy2

        # define trans_ic_to_canvas : unless _image_skew_coordinate is
        # set, it is simply a affine part of the original transform.
        if self._image_skew_coordinate:
            # skew the image when required.
            x_lrc, y_lrc = self._image_skew_coordinate
            xy2 = trans.transform(np.array([(x_lrc, y_lrc)]))
            _xx3, _yy3 = xy2[0]

            tr_rotate_skew = self._get_rotate_and_skew_transform(
                _xx1, _yy1, _xx2, _yy2, _xx3, _yy3)
            trans_ic_to_canvas = tr_rotate_skew
        else:
            trans_ic_to_canvas = IdentityTransform()

        # Now, viewLim in the ic.  It can be rotated and can be
        # skewed. Make it big enough.
        x1, y1, x2, y2 = self.axes.bbox.extents
        trans_canvas_to_ic = trans_ic_to_canvas.inverted()
        xy_ = trans_canvas_to_ic.transform(
            np.array([(x1, y1), (x2, y1), (x2, y2), (x1, y2)]))
        x1_, x2_ = min(xy_[:, 0]), max(xy_[:, 0])
        y1_, y2_ = min(xy_[:, 1]), max(xy_[:, 1])
        viewLim_in_ic = Bbox.from_extents(x1_, y1_, x2_, y2_)

        # get the image, sliced if necessary. This is done in the ic.
        im, xmin, ymin, dxintv, dyintv, sx, sy = \
            self._get_unsampled_image(self._A, extent_in_ic, viewLim_in_ic)

        if im is None:
            return  # I'm not if this check is required. -JJL

        fc = self.axes.patch.get_facecolor()
        bg = mcolors.colorConverter.to_rgba(fc, 0)
        im.set_bg(*bg)

        # image input dimensions
        im.reset_matrix()
        numrows, numcols = im.get_size()

        if numrows <= 0 or numcols <= 0:
            return
        im.resize(numcols, numrows)  # just to create im.bufOut that
        # is required by backends. There
        # may be better solution -JJL

        im._url = self.get_url()
        im._gid = self.get_gid()

        renderer.draw_image(gc, xmin, ymin, im, dxintv, dyintv,
                            trans_ic_to_canvas)
from __future__ import division, print_function

import numpy as np

from matplotlib.image import AxesImage
from matplotlib.transforms import (IdentityTransform, TransformedBbox,
                                   BboxTransformFrom, Bbox)

__all__ = ['BaseImageArtist']

EMPTY_IMAGE = np.array([[np.nan]])
IDENTITY = IdentityTransform()

SUPPORTS_RESIZE = []

try:
    from matplotlib.backends.backend_tkagg import FigureCanvasTk
except ImportError:
    pass
else:
    SUPPORTS_RESIZE.append(FigureCanvasTk)

try:
    from matplotlib.backends.backend_qt5 import FigureCanvasQT
except ImportError:
    pass
else:
    SUPPORTS_RESIZE.append(FigureCanvasQT)

SUPPORTS_RESIZE = tuple(SUPPORTS_RESIZE)
예제 #11
0
    def _make_image_special(self, A, density, in_bbox, out_bbox, clip_bbox, magnification=1.0,
                    unsampled=False, round_to_pixel_border=True, densities=None):
        """
        This function is a copy of _ImageBase._make_image(*args, **kwargs), but with only the A.ndim == 2 case,
        and with the transformation of the alpha channel on top of the normal behaviour with the colormap's RGBA
        """

        if A is None:
            raise RuntimeError('You must first set the image '
                               'array or the image attribute')
        if A.size == 0:
            raise RuntimeError("_make_image must get a non-empty image. "
                               "Your Artist's draw method must filter before "
                               "this method is called.")

        clipped_bbox = Bbox.intersection(out_bbox, clip_bbox)

        if clipped_bbox is None:
            return None, 0, 0, None

        out_width_base = clipped_bbox.width * magnification
        out_height_base = clipped_bbox.height * magnification

        if out_width_base == 0 or out_height_base == 0:
            return None, 0, 0, None

        if self.origin == 'upper':
            # Flip the input image using a transform.  This avoids the
            # problem with flipping the array, which results in a copy
            # when it is converted to contiguous in the C wrapper
            t0 = Affine2D().translate(0, -A.shape[0]).scale(1, -1)
        else:
            t0 = IdentityTransform()

        t0 += (
                Affine2D()
                .scale(
                    in_bbox.width / A.shape[1],
                    in_bbox.height / A.shape[0])
                .translate(in_bbox.x0, in_bbox.y0)
                + self.get_transform())

        t = (t0
             + Affine2D().translate(
                    -clipped_bbox.x0,
                    -clipped_bbox.y0)
             .scale(magnification, magnification))

        # So that the image is aligned with the edge of the axes, we want
        # to round up the output width to the next integer.  This also
        # means scaling the transform just slightly to account for the
        # extra subpixel.
        if (t.is_affine and round_to_pixel_border and
                (out_width_base % 1.0 != 0.0 or out_height_base % 1.0 != 0.0)):
            out_width = int(ceil(out_width_base))
            out_height = int(ceil(out_height_base))
            extra_width = (out_width - out_width_base) / out_width_base
            extra_height = (out_height - out_height_base) / out_height_base
            t += Affine2D().scale(1.0 + extra_width, 1.0 + extra_height)
        else:
            out_width = int(out_width_base)
            out_height = int(out_height_base)

        if not unsampled:
            #if A.ndim not in (2, 3):
            #    raise ValueError("Invalid dimensions, got {}".format(A.shape))

            # if we are a 2D array, then we are running through the
            # norm + colormap transformation.  However, in general the
            # input data is not going to match the size on the screen so we
            # have to resample to the correct number of pixels
            # need to

            # TODO slice input array first
            inp_dtype = A.dtype
            a_min = A.min()
            a_max = A.max()
            # figure out the type we should scale to.  For floats,
            # leave as is.  For integers cast to an appropriate-sized
            # float.  Small integers get smaller floats in an attempt
            # to keep the memory footprint reasonable.
            if a_min is np.ma.masked:
                # all masked, so values don't matter
                a_min, a_max = np.int32(0), np.int32(1)
            if inp_dtype.kind == 'f':
                scaled_dtype = A.dtype
            else:
                # probably an integer of some type.
                da = a_max.astype(np.float64) - a_min.astype(np.float64)
                if da > 1e8:
                    # give more breathing room if a big dynamic range
                    scaled_dtype = np.float64
                else:
                    scaled_dtype = np.float32

            # scale the input data to [.1, .9].  The Agg
            # interpolators clip to [0, 1] internally, use a
            # smaller input scale to identify which of the
            # interpolated points need to be should be flagged as
            # over / under.
            # This may introduce numeric instabilities in very broadly
            # scaled data
            A_scaled = np.empty(A.shape, dtype=scaled_dtype)
            A_scaled[:] = A
            # clip scaled data around norm if necessary.
            # This is necessary for big numbers at the edge of
            # float64's ability to represent changes.  Applying
            # a norm first would be good, but ruins the interpolation
            # of over numbers.
            self.norm.autoscale_None(A)
            dv = (np.float64(self.norm.vmax) -
                  np.float64(self.norm.vmin))
            vmid = self.norm.vmin + dv / 2
            fact = 1e7 if scaled_dtype == np.float64 else 1e4
            newmin = vmid - dv * fact
            if newmin < a_min:
                newmin = None
            else:
                a_min = np.float64(newmin)
            newmax = vmid + dv * fact
            if newmax > a_max:
                newmax = None
            else:
                a_max = np.float64(newmax)
            if newmax is not None or newmin is not None:
                A_scaled = np.clip(A_scaled, newmin, newmax)

            A_scaled -= a_min
            # a_min and a_max might be ndarray subclasses so use
            # asscalar to avoid errors
            a_min = np.asscalar(a_min.astype(scaled_dtype))
            a_max = np.asscalar(a_max.astype(scaled_dtype))

            if a_min != a_max:
                A_scaled /= ((a_max - a_min) / 0.8)
            A_scaled += 0.1
            A_resampled = np.zeros((out_height, out_width),
                                   dtype=A_scaled.dtype)
            # resample the input data to the correct resolution and shape
            _image.resample(A_scaled, A_resampled,
                            t,
                            _interpd_[self.get_interpolation()],
                            self.get_resample(), 1.0,
                            self.get_filternorm() or 0.0,
                            self.get_filterrad() or 0.0)

            #alpha = self.get_alpha()

            new_density = np.zeros((out_height, out_width),
                                 dtype=density.dtype)
            _image.resample(density, new_density,
                            t,
                            _interpd_[self.get_interpolation()],
                            self.get_resample(), 1.0,
                            self.get_filternorm() or 0.0,
                            self.get_filterrad() or 0.0)

            # we are done with A_scaled now, remove from namespace
            # to be sure!
            del A_scaled
            # un-scale the resampled data to approximately the
            # original range things that interpolated to above /
            # below the original min/max will still be above /
            # below, but possibly clipped in the case of higher order
            # interpolation + drastically changing data.
            A_resampled -= 0.1
            if a_min != a_max:
                A_resampled *= ((a_max - a_min) / 0.8)
            A_resampled += a_min
            # if using NoNorm, cast back to the original datatype
            if isinstance(self.norm, mcolors.NoNorm):
                A_resampled = A_resampled.astype(A.dtype)

            mask = np.empty(A.shape, dtype=np.float32)
            if A.mask.shape == A.shape:
                # this is the case of a nontrivial mask
                mask[:] = np.where(A.mask, np.float32(np.nan),
                                   np.float32(1))
            else:
                mask[:] = 1

            # we always have to interpolate the mask to account for
            # non-affine transformations
            out_mask = np.zeros((out_height, out_width),
                                dtype=mask.dtype)
            _image.resample(mask, out_mask,
                            t,
                            _interpd_[self.get_interpolation()],
                            True, 1,
                            self.get_filternorm() or 0.0,
                            self.get_filterrad() or 0.0)
            # we are done with the mask, delete from namespace to be sure!
            del mask
            # Agg updates the out_mask in place.  If the pixel has
            # no image data it will not be updated (and still be 0
            # as we initialized it), if input data that would go
            # into that output pixel than it will be `nan`, if all
            # the input data for a pixel is good it will be 1, and
            # if there is _some_ good data in that output pixel it
            # will be between [0, 1] (such as a rotated image).

            out_alpha = np.array(out_mask)
            out_mask = np.isnan(out_mask)
            out_alpha[out_mask] = 1
            new_alpha = alpha_from_densities(new_density, tresfrac=self.c_alpha_log_tresh, scaleval=self.c_alpha_log_enhance, logscale=self.c_alpha_logdensity)
            out_alpha *= new_alpha

            # mask and run through the norm
            output = self.norm(np.ma.masked_array(A_resampled, out_mask))

            # at this point output is either a 2D array of normed data
            # (of int or float)
            # or an RGBA array of re-sampled input
            output = self.to_rgba(output, bytes=True, norm=False)
            # output is now a correctly sized RGBA array of uint8

            # Apply alpha *after* if the input was greyscale without a mask
            if A.ndim == 2:
                # alpha = self.get_alpha()
                # if alpha is None:
                #    alpha = 1
                alpha = 1
                alpha_channel = output[:, :, 3]
                alpha_channel[:] = np.asarray(
                    np.asarray(alpha_channel, np.float32) * out_alpha * alpha,
                    np.uint8)

        else:
            if self._imcache is None:
                self._imcache = self.to_rgba(A, bytes=True, norm=(A.ndim == 2))
            output = self._imcache

            # Subset the input image to only the part that will be
            # displayed
            subset = TransformedBbox(
                clip_bbox, t0.frozen().inverted()).frozen()
            output = output[
                     int(max(subset.ymin, 0)):
                     int(min(subset.ymax + 1, output.shape[0])),
                     int(max(subset.xmin, 0)):
                     int(min(subset.xmax + 1, output.shape[1]))]

            t = Affine2D().translate(
                int(max(subset.xmin, 0)), int(max(subset.ymin, 0))) + t

        return output, clipped_bbox.x0, clipped_bbox.y0, t
예제 #12
0
    def _set_lim_and_transforms(self):
        """
        This is called once when the plot is created to set up all the
        transforms for the data, text and grids.
        """
        # There are three important coordinate spaces going on here:
        #
        #    1. Data space: The space of the data itself
        #
        #    2. Axes space: The unit rectangle (0, 0) to (1, 1)
        #       covering the entire plot area.
        #
        #    3. Display space: The coordinates of the resulting image,
        #       often in pixels or dpi/inch.

        # This function makes heavy use of the Transform classes in
        # ``lib/matplotlib/transforms.py.`` For more information, see
        # the inline documentation there.

        # The goal of the first two transformations is to get from the
        # data space (in this case longitude and latitude) to axes
        # space.  It is separated into a non-affine and affine part so
        # that the non-affine part does not have to be recomputed when
        # a simple affine change to the figure has been made (such as
        # resizing the window or changing the dpi).

        # 1) The core transformation from data space into
        # rectilinear space defined in the HammerTransform class.
        self.transProjection = IdentityTransform()
        # 2) The above has an output range that is not in the unit
        # rectangle, so scale and translate it so it fits correctly
        # within the axes.  The peculiar calculations of xscale and
        # yscale are specific to a Aitoff-Hammer projection, so don't
        # worry about them too much.
        self.transAffine = Affine2D.from_values(1., 0, 0.5,
                                                np.sqrt(3) / 2., 0, 0)
        self.transAffinedep = Affine2D.from_values(1., 0, -0.5,
                                                   np.sqrt(3) / 2., 0, 0)
        #self.transAffine = IdentityTransform()

        # 3) This is the transformation from axes space to display
        # space.
        self.transAxes = BboxTransformTo(self.bbox)

        # Now put these 3 transforms together -- from data all the way
        # to display coordinates.  Using the '+' operator, these
        # transforms will be applied "in order".  The transforms are
        # automatically simplified, if possible, by the underlying
        # transformation framework.
        self.transData = \
            self.transProjection + \
            self.transAffine + \
            self.transAxes

        # The main data transformation is set up.  Now deal with
        # gridlines and tick labels.

        # Longitude gridlines and ticklabels.  The input to these
        # transforms are in display space in x and axes space in y.
        # Therefore, the input values will be in range (-xmin, 0),
        # (xmax, 1).  The goal of these transforms is to go from that
        # space to display space.  The tick labels will be offset 4
        # pixels from the equator.

        self._xaxis_pretransform = IdentityTransform()
        self._xaxis_transform = \
            self._xaxis_pretransform + \
            self.transData
        self._xaxis_text1_transform = \
            Affine2D().scale(1.0, 0.0) + \
            self.transData + \
            Affine2D().translate(0.0, -20.0)
        self._xaxis_text2_transform = \
            Affine2D().scale(1.0, 0.0) + \
            self.transData + \
            Affine2D().translate(0.0, -4.0)

        # Now set up the transforms for the latitude ticks.  The input to
        # these transforms are in axes space in x and display space in
        # y.  Therefore, the input values will be in range (0, -ymin),
        # (1, ymax).  The goal of these transforms is to go from that
        # space to display space.  The tick labels will be offset 4
        # pixels from the edge of the axes ellipse.

        self._yaxis_transform = self.transData
        yaxis_text_base = \
            self.transProjection + \
            (self.transAffine + \
             self.transAxes)
        self._yaxis_text1_transform = \
            yaxis_text_base + \
            Affine2D().translate(-8.0, 0.0)
        self._yaxis_text2_transform = \
            yaxis_text_base + \
            Affine2D().translate(8.0, 0.0)
예제 #13
0
	def draw_clouds(self,data_type="SASA",color_type="Blues"):

		fig_size_in_points_x = self.molecule.molsize1+int(200 * float(self.molecule.molsize1)/900) #To give enough expansion to the cloud - might have to update later
		fig_size_in_points_y = self.molecule.molsize2+int(200 * float(self.molecule.molsize1)/900) #To give enough expansion to the cloud - might have to update later
		color = self.normalise_colour(data_type,color_type)
		plt.figure(figsize=(float(fig_size_in_points_x)/72,float(fig_size_in_points_y)/72))

		#find if coordinates are under 0 and therefore will not show
		min_x = min([x for xds in self.shared_coords_x.values() for x in xds])
		if min_x >0:
			min_x = 0
		min_y = min([y for yds in self.shared_coords_y.values() for y in yds])
		if min_y >0:
			min_y =0
		for atom in self.molecule.ligand_atom_coords_from_diagr:
			xs = self.shared_coords_x[atom]
			ys= self.shared_coords_y[atom]
			if len(xs) == 0:
				continue

			segs = [([xs[0]-min_x],[ys[0]-min_y])]
			for idx1, idx2 in zip(xrange(0, len(xs)-1), xrange(1, len(xs))):
				dx = xs[idx1] - xs[idx2]
				dy = ys[idx1] - ys[idx2]
				dist = (dx ** 2 + dy ** 2) ** 0.5
				if dist > 40:
					segs.append(([],[]))
				segs[-1][0].append(xs[idx2]-min_x)
				segs[-1][1].append(ys[idx2]-min_y)
			for seg_x, seg_y in segs:
				plt.plot(seg_x,seg_y,linewidth=8,c=color[[k for k,v in self.lig_descr.ligand_atoms.items() if v["name"]==atom][0]],transform=IdentityTransform())
		plt.axis('equal')
		plt.axis("off")


		pylab.savefig("cloud.svg",dpi=(100),transparent=True)
		self.manage_cloud_diagrams(min_x,min_y)
예제 #14
0
Modification of Chris Beaumont's mpl-modest-image package to allow the use of
set_extent.
"""
from __future__ import print_function, division

import matplotlib
rcParams = matplotlib.rcParams

import matplotlib.image as mi
import matplotlib.colors as mcolors
import matplotlib.cbook as cbook
from matplotlib.transforms import IdentityTransform, Affine2D

import numpy as np

IDENTITY_TRANSFORM = IdentityTransform()


class ModestImage(mi.AxesImage):
    """
    Computationally modest image class.

    ModestImage is an extension of the Matplotlib AxesImage class
    better suited for the interactive display of larger images. Before
    drawing, ModestImage resamples the data array based on the screen
    resolution and view window. This has very little affect on the
    appearance of the image, but can substantially cut down on
    computation since calculations of unresolved or clipped pixels
    are skipped.

    The interface of ModestImage is the same as AxesImage. However, it
예제 #15
0
    usetex = plt.rcParams["text.usetex"]

    fig = plt.figure()

    # EXAMPLE 1

    ax = plt.subplot(211)

    arr = plt.imread(get_sample_data("grace_hopper.png"))

    text_path = TextPath((0, 0), "!?", size=150)
    p = PathClippedImagePatch(text_path,
                              arr,
                              ec="k",
                              transform=IdentityTransform())

    # p.set_clip_on(False)

    # make offset box
    offsetbox = AuxTransformBox(IdentityTransform())
    offsetbox.add_artist(p)

    # make anchored offset box
    ao = AnchoredOffsetbox(loc='upper left',
                           child=offsetbox,
                           frameon=True,
                           borderpad=0.2)
    ax.add_artist(ao)

    # another text
예제 #16
0
    def _make_image(self, A, in_bbox, out_bbox, clip_bbox, magnification=1.0,
                    unsampled=False, round_to_pixel_border=True):
        """
        Normalize, rescale and color the image `A` from the given
        in_bbox (in data space), to the given out_bbox (in pixel
        space) clipped to the given clip_bbox (also in pixel space),
        and magnified by the magnification factor.

        `A` may be a greyscale image (MxN) with a dtype of `float32`,
        `float64`, `uint16` or `uint8`, or an RGBA image (MxNx4) with
        a dtype of `float32`, `float64`, or `uint8`.

        If `unsampled` is True, the image will not be scaled, but an
        appropriate affine transformation will be returned instead.

        If `round_to_pixel_border` is True, the output image size will
        be rounded to the nearest pixel boundary.  This makes the
        images align correctly with the axes.  It should not be used
        in cases where you want exact scaling, however, such as
        FigureImage.

        Returns the resulting (image, x, y, trans), where (x, y) is
        the upper left corner of the result in pixel space, and
        `trans` is the affine transformation from the image to pixel
        space.
        """
        if A is None:
            raise RuntimeError('You must first set the image'
                               ' array or the image attribute')

        clipped_bbox = Bbox.intersection(out_bbox, clip_bbox)

        if clipped_bbox is None:
            return None, 0, 0, None

        out_width_base = clipped_bbox.width * magnification
        out_height_base = clipped_bbox.height * magnification

        if out_width_base == 0 or out_height_base == 0:
            return None, 0, 0, None

        if self.origin == 'upper':
            # Flip the input image using a transform.  This avoids the
            # problem with flipping the array, which results in a copy
            # when it is converted to contiguous in the C wrapper
            t0 = Affine2D().translate(0, -A.shape[0]).scale(1, -1)
        else:
            t0 = IdentityTransform()

        t0 += (
            Affine2D()
            .scale(
                in_bbox.width / A.shape[1],
                in_bbox.height / A.shape[0])
            .translate(in_bbox.x0, in_bbox.y0)
            + self.get_transform())

        t = (t0
             + Affine2D().translate(
                 -clipped_bbox.x0,
                 -clipped_bbox.y0)
             .scale(magnification, magnification))

        # So that the image is aligned with the edge of the axes, we want
        # to round up the output width to the next integer.  This also
        # means scaling the transform just slightly to account for the
        # extra subpixel.
        if (t.is_affine and round_to_pixel_border and
                (out_width_base % 1.0 != 0.0 or out_height_base % 1.0 != 0.0)):
            out_width = int(ceil(out_width_base))
            out_height = int(ceil(out_height_base))
            extra_width = (out_width - out_width_base) / out_width_base
            extra_height = (out_height - out_height_base) / out_height_base
            t += Affine2D().scale(
                1.0 + extra_width, 1.0 + extra_height)
        else:
            out_width = int(out_width_base)
            out_height = int(out_height_base)

        if not unsampled:
            created_rgba_mask = False

            if A.ndim not in (2, 3):
                raise ValueError("Invalid dimensions, got %s" % (A.shape,))

            if A.ndim == 2:
                A = self.norm(A)
                if A.dtype.kind == 'f':
                    # If the image is greyscale, convert to RGBA and
                    # use the extra channels for resizing the over,
                    # under, and bad pixels.  This is needed because
                    # Agg's resampler is very aggressive about
                    # clipping to [0, 1] and we use out-of-bounds
                    # values to carry the over/under/bad information
                    rgba = np.empty((A.shape[0], A.shape[1], 4), dtype=A.dtype)
                    rgba[..., 0] = A  # normalized data
                    # this is to work around spurious warnings coming
                    # out of masked arrays.
                    with np.errstate(invalid='ignore'):
                        rgba[..., 1] = A < 0  # under data
                        rgba[..., 2] = A > 1  # over data
                    rgba[..., 3] = ~A.mask  # bad data
                    A = rgba
                    output = np.zeros((out_height, out_width, 4),
                                      dtype=A.dtype)
                    alpha = 1.0
                    created_rgba_mask = True
                else:
                    # colormap norms that output integers (ex NoNorm
                    # and BoundaryNorm) to RGBA space before
                    # interpolating.  This is needed due to the
                    # Agg resampler only working on floats in the
                    # range [0, 1] and because interpolating indexes
                    # into an arbitrary LUT may be problematic.
                    #
                    # This falls back to interpolating in RGBA space which
                    # can produce it's own artifacts of colors not in the map
                    # showing up in the final image.
                    A = self.cmap(A, alpha=self.get_alpha(), bytes=True)

            if not created_rgba_mask:
                # Always convert to RGBA, even if only RGB input
                if A.shape[2] == 3:
                    A = _rgb_to_rgba(A)
                elif A.shape[2] != 4:
                    raise ValueError("Invalid dimensions, got %s" % (A.shape,))

                output = np.zeros((out_height, out_width, 4), dtype=A.dtype)

                alpha = self.get_alpha()
                if alpha is None:
                    alpha = 1.0

            _image.resample(
                A, output, t, _interpd_[self.get_interpolation()],
                self.get_resample(), alpha,
                self.get_filternorm() or 0.0, self.get_filterrad() or 0.0)

            if created_rgba_mask:
                # Convert back to a masked greyscale array so
                # colormapping works correctly
                hid_output = output
                output = np.ma.masked_array(
                    hid_output[..., 0], hid_output[..., 3] < 0.5)
                # relabel under data
                output[hid_output[..., 1] > .5] = -1
                # relabel over data
                output[hid_output[..., 2] > .5] = 2

            output = self.to_rgba(output, bytes=True, norm=False)

            # Apply alpha *after* if the input was greyscale without a mask
            if A.ndim == 2 or created_rgba_mask:
                alpha = self.get_alpha()
                if alpha is not None and alpha != 1.0:
                    alpha_channel = output[:, :, 3]
                    alpha_channel[:] = np.asarray(
                        np.asarray(alpha_channel, np.float32) * alpha,
                        np.uint8)
        else:
            if self._imcache is None:
                self._imcache = self.to_rgba(A, bytes=True, norm=(A.ndim == 2))
            output = self._imcache

            # Subset the input image to only the part that will be
            # displayed
            subset = TransformedBbox(
                clip_bbox, t0.frozen().inverted()).frozen()
            output = output[
                int(max(subset.ymin, 0)):
                int(min(subset.ymax + 1, output.shape[0])),
                int(max(subset.xmin, 0)):
                int(min(subset.xmax + 1, output.shape[1]))]

            t = Affine2D().translate(
                int(max(subset.xmin, 0)), int(max(subset.ymin, 0))) + t

        return output, clipped_bbox.x0, clipped_bbox.y0, t
예제 #17
0
    def _make_image(self,
                    A,
                    in_bbox,
                    out_bbox,
                    clip_bbox,
                    magnification=1.0,
                    unsampled=False,
                    round_to_pixel_border=True):
        """
        Normalize, rescale and color the image `A` from the given
        in_bbox (in data space), to the given out_bbox (in pixel
        space) clipped to the given clip_bbox (also in pixel space),
        and magnified by the magnification factor.

        `A` may be a greyscale image (MxN) with a dtype of `float32`,
        `float64`, `uint16` or `uint8`, or an RGBA image (MxNx4) with
        a dtype of `float32`, `float64`, or `uint8`.

        If `unsampled` is True, the image will not be scaled, but an
        appropriate affine transformation will be returned instead.

        If `round_to_pixel_border` is True, the output image size will
        be rounded to the nearest pixel boundary.  This makes the
        images align correctly with the axes.  It should not be used
        in cases where you want exact scaling, however, such as
        FigureImage.

        Returns the resulting (image, x, y, trans), where (x, y) is
        the upper left corner of the result in pixel space, and
        `trans` is the affine transformation from the image to pixel
        space.
        """
        if A is None:
            raise RuntimeError('You must first set the image'
                               ' array or the image attribute')

        clipped_bbox = Bbox.intersection(out_bbox, clip_bbox)

        if clipped_bbox is None:
            return None, 0, 0, None

        out_width_base = clipped_bbox.width * magnification
        out_height_base = clipped_bbox.height * magnification

        if out_width_base == 0 or out_height_base == 0:
            return None, 0, 0, None

        if self.origin == 'upper':
            # Flip the input image using a transform.  This avoids the
            # problem with flipping the array, which results in a copy
            # when it is converted to contiguous in the C wrapper
            t0 = Affine2D().translate(0, -A.shape[0]).scale(1, -1)
        else:
            t0 = IdentityTransform()

        t0 += (Affine2D().scale(in_bbox.width / A.shape[1], in_bbox.height /
                                A.shape[0]).translate(in_bbox.x0, in_bbox.y0) +
               self.get_transform())

        t = (t0 +
             Affine2D().translate(-clipped_bbox.x0, -clipped_bbox.y0).scale(
                 magnification, magnification))

        # So that the image is aligned with the edge of the axes, we want
        # to round up the output width to the next integer.  This also
        # means scaling the transform just slightly to account for the
        # extra subpixel.
        if (t.is_affine and round_to_pixel_border and
            (out_width_base % 1.0 != 0.0 or out_height_base % 1.0 != 0.0)):
            out_width = int(ceil(out_width_base) + 1)
            out_height = int(ceil(out_height_base) + 1)
            extra_width = (out_width - out_width_base) / out_width_base
            extra_height = (out_height - out_height_base) / out_height_base
            t += Affine2D().scale(1.0 + extra_width, 1.0 + extra_height)
        else:
            out_width = int(out_width_base)
            out_height = int(out_height_base)

        if not unsampled:
            created_rgba_mask = False

            if A.ndim == 2:
                A = self.norm(A)
                # If the image is greyscale, convert to RGBA with the
                # correct alpha channel for resizing
                rgba = np.empty((A.shape[0], A.shape[1], 4), dtype=A.dtype)
                rgba[..., 0:3] = np.expand_dims(A, 2)
                if A.dtype.kind == 'f':
                    rgba[..., 3] = ~A.mask
                else:
                    rgba[..., 3] = np.where(A.mask, 0, np.iinfo(A.dtype).max)
                A = rgba
                output = np.zeros((out_height, out_width, 4), dtype=A.dtype)
                alpha = 1.0
                created_rgba_mask = True
            elif A.ndim == 3:
                # Always convert to RGBA, even if only RGB input
                if A.shape[2] == 3:
                    A = _rgb_to_rgba(A)
                elif A.shape[2] != 4:
                    raise ValueError("Invalid dimensions, got %s" %
                                     (A.shape, ))

                output = np.zeros((out_height, out_width, 4), dtype=A.dtype)

                alpha = self.get_alpha()
                if alpha is None:
                    alpha = 1.0
            else:
                raise ValueError("Invalid dimensions, got %s" % (A.shape, ))

            _image.resample(A, output, t, _interpd_[self.get_interpolation()],
                            self.get_resample(), alpha,
                            self.get_filternorm() or 0.0,
                            self.get_filterrad() or 0.0)

            if created_rgba_mask:
                # Convert back to a masked greyscale array so
                # colormapping works correctly
                output = np.ma.masked_array(output[..., 0],
                                            output[..., 3] < 0.5)

            output = self.to_rgba(output, bytes=True, norm=False)

            # Apply alpha *after* if the input was greyscale without a mask
            if A.ndim == 2 or created_rgba_mask:
                alpha = self.get_alpha()
                if alpha is not None and alpha != 1.0:
                    alpha_channel = output[:, :, 3]
                    alpha_channel[:] = np.asarray(
                        np.asarray(alpha_channel, np.float32) * alpha,
                        np.uint8)
        else:
            if self._imcache is None:
                self._imcache = self.to_rgba(A, bytes=True, norm=(A.ndim == 2))
            output = self._imcache

            # Subset the input image to only the part that will be
            # displayed
            subset = TransformedBbox(clip_bbox,
                                     t0.frozen().inverted()).frozen()
            output = output[int(max(subset.ymin, 0)
                                ):int(min(subset.ymax + 1, output.shape[0])),
                            int(max(subset.xmin, 0)
                                ):int(min(subset.xmax + 1, output.shape[1]))]

            t = Affine2D().translate(int(max(subset.xmin, 0)),
                                     int(max(subset.ymin, 0))) + t

        return output, clipped_bbox.x0, clipped_bbox.y0, t
예제 #18
0
    def _draw_unsampled_image(self, renderer, gc):
        """
        draw unsampled image. The renderer should support a draw_image method
        with scale parameter.
        """
        trans = self.get_transform()  # axes.transData

        # convert the coordinates to the intermediate coordinate (ic).
        # The transformation from the ic to the canvas is a pure
        # affine transform.

        # A straight-forward way is to use the non-affine part of the
        # original transform for conversion to the ic.

        # firs, convert the image extent to the ic
        x_llc, x_trc, y_llc, y_trc = self.get_extent()

        xy = trans.transform(np.array([(x_llc, y_llc),
                                       (x_trc, y_trc)]))

        _xx1, _yy1 = xy[0]
        _xx2, _yy2 = xy[1]

        extent_in_ic = _xx1, _xx2, _yy1, _yy2

        # define trans_ic_to_canvas : unless _image_skew_coordinate is
        # set, it is simply a affine part of the original transform.
        if self._image_skew_coordinate:
            # skew the image when required.
            x_lrc, y_lrc = self._image_skew_coordinate
            xy2 = trans.transform(np.array([(x_lrc, y_lrc)]))
            _xx3, _yy3 = xy2[0]

            tr_rotate_skew = self._get_rotate_and_skew_transform(_xx1, _yy1,
                                                                 _xx2, _yy2,
                                                                 _xx3, _yy3)
            trans_ic_to_canvas = tr_rotate_skew
        else:
            trans_ic_to_canvas = IdentityTransform()

        # Now, viewLim in the ic.  It can be rotated and can be
        # skewed. Make it big enough.
        x1, y1, x2, y2 = self.axes.bbox.extents
        trans_canvas_to_ic = trans_ic_to_canvas.inverted()
        xy_ = trans_canvas_to_ic.transform(np.array([(x1, y1),
                                                     (x2, y1),
                                                     (x2, y2),
                                                     (x1, y2)]))
        x1_, x2_ = min(xy_[:, 0]), max(xy_[:, 0])
        y1_, y2_ = min(xy_[:, 1]), max(xy_[:, 1])
        viewLim_in_ic = Bbox.from_extents(x1_, y1_, x2_, y2_)

        # get the image, sliced if necessary. This is done in the ic.
        im, xmin, ymin, dxintv, dyintv, sx, sy = \
            self._get_unsampled_image(self._A, extent_in_ic, viewLim_in_ic)

        if im is None:
            return  # I'm not if this check is required. -JJL

        fc = self.axes.patch.get_facecolor()
        bg = mcolors.colorConverter.to_rgba(fc, 0)
        im.set_bg(*bg)

        # image input dimensions
        im.reset_matrix()
        numrows, numcols = im.get_size()

        if numrows <= 0 or numcols <= 0:
            return
        im.resize(numcols, numrows)  # just to create im.bufOut that
                                     # is required by backends. There
                                     # may be better solution -JJL

        im._url = self.get_url()
        im._gid = self.get_gid()

        renderer.draw_image(gc, xmin, ymin, im, dxintv, dyintv,
                            trans_ic_to_canvas)
예제 #19
0
    def __init__(self,
                 xy,
                 p1,
                 p2,
                 size=75,
                 unit="points",
                 ax=None,
                 text="",
                 textposition="inside",
                 text_kw=None,
                 **kwargs):
        """
        Parameters
        ----------
        xy, p1, p2 : tuple or array of two floats
            Center position and two points. Angle annotation is drawn between
            the two vectors connecting *p1* and *p2* with *xy*, respectively.
            Units are data coordinates.

        size : float
            Diameter of the angle annotation in units specified by *unit*.

        unit : str
            One of the following strings to specify the unit of *size*:

            * "pixels": pixels
            * "points": points, use points instead of pixels to not have a
              dependence on the DPI
            * "axes width", "axes height": relative units of Axes width, height
            * "axes min", "axes max": minimum or maximum of relative Axes
              width, height

        ax : `matplotlib.axes.Axes`
            The Axes to add the angle annotation to.

        text : str
            The text to mark the angle with.

        textposition : {"inside", "outside", "edge"}
            Whether to show the text in- or outside the arc. "edge" can be used
            for custom positions anchored at the arc's edge.

        text_kw : dict
            Dictionary of arguments passed to the Annotation.

        **kwargs
            Further parameters are passed to `matplotlib.patches.Arc`. Use this
            to specify, color, linewidth etc. of the arc.

        """
        self.ax = ax or plt.gca()
        self._xydata = xy  # in data coordinates
        self.vec1 = p1
        self.vec2 = p2
        self.size = size
        self.unit = unit
        self.textposition = textposition

        super().__init__(self._xydata,
                         size,
                         size,
                         angle=0.0,
                         theta1=self.theta1,
                         theta2=self.theta2,
                         **kwargs)

        self.set_transform(IdentityTransform())
        self.ax.add_patch(self)

        self.kw = dict(ha="center",
                       va="center",
                       xycoords=IdentityTransform(),
                       xytext=(0, 0),
                       textcoords="offset points",
                       annotation_clip=True)
        self.kw.update(text_kw or {})
        self.text = ax.annotate(text, xy=self._center, **self.kw)