示例#1
0
 def _debug_draw_clipping_path(self, x, y, width, height):
     a,b,c,d,tx,ty = affine.affine_params(self.get_ctm())
     transform = 'matrix(%(a)f,%(b)f,%(c)f,%(d)f,%(tx)f,%(ty)f)' % locals()
     self._emit('rect', x=x, y=y, width=width, height=height,
                transform=transform,
                style=_mkstyle({'stroke-width': 5,
                                'fill':'none',
                                'stroke': 'green'}))
示例#2
0
 def device_set_clipping_path(self, x, y, width, height):
     #self._debug_draw_clipping_path(x, y, width, height)
     #return
     global _clip_counter
     self.state._clip_id = 'clip_%d' % _clip_counter
     _clip_counter += 1
     x,y = self._fixpoints([[x,y]])[0]
     a,b,c,d,tx,ty = affine.affine_params(self.get_ctm())
     transform = 'matrix(%(a)f,%(b)f,%(c)f,%(d)f,%(tx)f,%(ty)f)' % locals()
     rect = self._build('rect', x=x, y=y, width=width, height=height)
     clippath = self._build('clipPath', contents=rect, id=self.state._clip_id)
     self._emit('g', transform=transform, contents=clippath)
示例#3
0
    def device_fill_points(self, points, mode):
        points = self._fixpoints(points)
        if mode in (FILL, FILL_STROKE, EOF_FILL_STROKE):
            fill = self._color(self.state.fill_color)
        else:
            fill = 'none'
        if mode in (STROKE, FILL_STROKE, EOF_FILL_STROKE):
            stroke = self._color(self.state.line_color)
        else:
            stroke = 'none'
        if mode in (EOF_FILL_STROKE, EOF_FILL):
            rule = 'evenodd'
        else:
            rule = 'nonzero'
        linecap = line_cap_map[self.state.line_cap]
        linejoin = line_join_map[self.state.line_join]
        dasharray = self._dasharray()
        width = '%3.3f' % self.state.line_width
        clip_id = getattr(self.state, '_clip_id', None)
        if clip_id:
            clip = 'url(#' + clip_id +')'
        else:
            clip = None
        a,b,c,d,tx,ty = affine.affine_params(self.get_ctm())
        transform = 'matrix(%(a)f,%(b)f,%(c)f,%(d)f,%(tx)f,%(ty)f)' % locals()
        if mode == STROKE:
            opacity = '%1.3f' % self.state.line_color[-1]
            self._emit('polyline',
                        transform=transform,
                        points=_strpoints(points),
                        kw=default_filter({'clip-path': (clip, None)}),
                        style=_mkstyle(default_filter({'opacity': (opacity, "1.000"),
                                        'stroke': stroke,
                                        'fill': 'none',
                                        'stroke-width': (width, "1.000"),
                                        'stroke-linejoin': (linejoin, 'miter'),
                                        'stroke-linecap': (linecap, 'butt'),
                                        'stroke-dasharray': (dasharray, 'none')})))

        else:
            opacity = '%1.3f' % self.state.fill_color[-1]
            self._emit('polygon',
                        transform=transform,
                        points=_strpoints(points),
                        kw=default_filter({'clip-path': (clip, None)}),
                        style=_mkstyle(default_filter({'opacity': (opacity, "1.000"),
                                        'stroke-width': (width, "1.000"),
                                        'fill': fill,
                                        'fill-rule': rule,
                                        'stroke': stroke,
                                        'stroke-linejoin': (linejoin, 'miter'),
                                        'stroke-linecap': (linecap, 'butt'),
                                        'stroke-dasharray': (dasharray, 'none')})))
示例#4
0
 def device_show_text(self, text):
     text = text.encode('ascii', 'xmlcharrefreplace')
     ttm = self.get_text_matrix()
     ctm = self.get_ctm()  # not device_ctm!!
     m = affine.concat(ctm,ttm)
     #height = self.get_full_text_extent(text)[1]
     a,b,c,d,tx,ty = affine.affine_params(m)
     transform = 'matrix(%(a)f,%(b)f,%(c)f,%(d)f,%(tx)f,%(ty)f) scale(1,-1)' % locals()
     self._emit('text', contents=text,
                kw={'font-family': self.font.fontName,
                    'font-size': str(self.font_size),
                    'xml:space': 'preserve',
                    'transform': transform})
示例#5
0
文件: ps.py 项目: conkiztador/pdviper
    def device_transform_device_ctm(self,func,args):
        if func == LOAD_CTM:
            self.contents.write('initmatrix\n')
            func = CONCAT_CTM

        if func == SCALE_CTM:
            sx, sy = args
            self.contents.write('%.3f %.3f scale\n' % (sx, sy))
        elif func == ROTATE_CTM:
            r, = args
            self.contents.write('%.3f rotate\n' % r)
        elif func == TRANSLATE_CTM:
            tx, ty = args
            self.contents.write('%.3f %.3f translate\n' % (tx, ty))
        elif func == CONCAT_CTM:
            m, = args
            self.contents.write('[%.3f %.3f %.3f %.3f %.3f %.3f] concat\n' % \
                                affine.affine_params(m))
示例#6
0
 def device_fill_points(self, points, mode):
     if mode in (FILL, FILL_STROKE, EOF_FILL_STROKE, EOF_FILL):
         fill = tuple(self.state.fill_color)
     else:
         fill = None
     if mode in (STROKE, FILL_STROKE, EOF_FILL_STROKE):
         color = tuple(self.state.line_color)
     else:
         color = tuple(self.state.fill_color)
     path = Path(points)
     gc = self._backend.new_gc()
     gc.set_linewidth(self.state.line_width)
     if not (self.state.line_dash[1] == 0).all():
         gc.set_dashes(self.state.line_dash[0], list(self.state.line_dash[1]))
     if self.state.clipping_path:
         gc.set_clip_path(self._get_transformed_clip_path())
     gc.set_joinstyle(line_join_map[self.state.line_join])
     gc.set_capstyle(line_cap_map[self.state.line_cap])
     gc.set_foreground(color, isRGB=True)
     gc.set_alpha(self.state.alpha)
     transform = Affine2D.from_values(*affine.affine_params(self.get_ctm()))
     self._backend.draw_path(gc, path, transform, fill)
     gc.restore()
示例#7
0
文件: mpl.py 项目: 0thm4n/pdviper
 def device_fill_points(self, points, mode):
     if mode in (FILL, FILL_STROKE, EOF_FILL_STROKE, EOF_FILL):
         fill = tuple(self.state.fill_color)
     else:
         fill = None
     if mode in (STROKE, FILL_STROKE, EOF_FILL_STROKE):
         color = tuple(self.state.line_color)
     else:
         color = tuple(self.state.fill_color)
     path = Path(points)
     gc = self._backend.new_gc()
     gc.set_linewidth(self.state.line_width)
     if not (self.state.line_dash[1] == 0).all():
         gc.set_dashes(self.state.line_dash[0],
                       list(self.state.line_dash[1]))
     if self.state.clipping_path:
         gc.set_clip_path(self._get_transformed_clip_path())
     gc.set_joinstyle(line_join_map[self.state.line_join])
     gc.set_capstyle(line_cap_map[self.state.line_cap])
     gc.set_foreground(color, isRGB=True)
     gc.set_alpha(self.state.alpha)
     transform = Affine2D.from_values(*affine.affine_params(self.get_ctm()))
     self._backend.draw_path(gc, path, transform, fill)
     gc.restore()
示例#8
0
 def set_text_position(self, x, y):
     """
     """
     a, b, c, d, tx, ty = affine.affine_params(self.state.text_matrix)
     tx, ty = x, y
     self.state.text_matrix = affine.affine_from_values(a, b, c, d, tx, ty)
示例#9
0
文件: ps.py 项目: conkiztador/pdviper
    def device_draw_image(self, img, rect):
        """
        draw_image(img_gc, rect=(x,y,w,h))

        Draws another gc into this one.  If 'rect' is not provided, then
        the image gc is drawn into this one, rooted at (0,0) and at full
        pixel size.  If 'rect' is provided, then the image is resized
        into the (w,h) given and drawn into this GC at point (x,y).

        img_gc is either a Numeric array (WxHx3 or WxHx4) or a GC from Kiva's
        Agg backend (kiva.agg.GraphicsContextArray).

        Requires the Python Imaging Library (PIL).
        """
        from PIL import Image as PilImage

        if type(img) == type(array([])):
            # Numeric array
            converted_img = agg.GraphicsContextArray(img, pix_format='rgba32')
            format = 'RGBA'
        elif isinstance(img, agg.GraphicsContextArray):
            if img.format().startswith('RGBA'):
                format = 'RGBA'
            elif img.format().startswith('RGB'):
                format = 'RGB'
            else:
                converted_img = img.convert_pixel_format('rgba32', inplace=0)
                format = 'RGBA'
            # Should probably take this into account
            # interp = img.get_image_interpolation()
        else:
            warnings.warn("Cannot render image of type %r into SVG context."
                          % type(img))
            return

        # converted_img now holds an Agg graphics context with the image
        pil_img = PilImage.fromstring(format,
                                      (converted_img.width(),
                                       converted_img.height()),
                                      converted_img.bmp_array.tostring())
        if rect == None:
            rect = (0, 0, img.width(), img.height())

        # PIL PS output doesn't support alpha.
        if format != 'RGB':
            pil_img = pil_img.convert('RGB')

        left, top, width, height = rect
        if width != img.width() or height != img.height():
            # This is not strictly required.
            pil_img = pil_img.resize((int(width), int(height)), PilImage.NEAREST)

        self.contents.write('gsave\n')
        self.contents.write('initmatrix\n')
        m = self.get_ctm()
        self.contents.write('[%.3f %.3f %.3f %.3f %.3f %.3f] concat\n' % \
                            affine.affine_params(m))
        self.contents.write('%.3f %.3f translate\n' % (left, top))
        # Rely on PIL's EpsImagePlugin to do the hard work here.
        pil_img.save(self.contents, 'eps', eps=0)
        self.contents.write('grestore\n')
示例#10
0
文件: mpl.py 项目: 0thm4n/pdviper
 def _get_transformed_clip_path(self):
     x, y, width, height = self.state.clipping_path
     rect = ((x, y), (x + width, y), (x + width, y + height), (x,
                                                               y + height))
     transform = Affine2D.from_values(*affine.affine_params(self.get_ctm()))
     return TransformedPath(Path(rect), transform)
示例#11
0
文件: mpl.py 项目: 0thm4n/pdviper
    def device_draw_image(self, img, rect):
        """
        draw_image(img_gc, rect=(x,y,w,h))

        Draws another gc into this one.  If 'rect' is not provided, then
        the image gc is drawn into this one, rooted at (0,0) and at full
        pixel size.  If 'rect' is provided, then the image is resized
        into the (w,h) given and drawn into this GC at point (x,y).

        img_gc is either a Numeric array (WxHx3 or WxHx4) or a GC from Kiva's
        Agg backend (kiva.agg.GraphicsContextArray).

        Requires the Python Imaging Library (PIL).
        """
        from PIL import Image as PilImage
        from matplotlib import _image

        # We turn img into a PIL object, since that is what ReportLab
        # requires.  To do this, we first determine if the input image
        # GC needs to be converted to RGBA/RGB.  If so, we see if we can
        # do it nicely (using convert_pixel_format), and if not, we do
        # it brute-force using Agg.
        if type(img) == type(array([])):
            # Numeric array
            converted_img = agg.GraphicsContextArray(img, pix_format='rgba32')
            format = 'RGBA'
        elif isinstance(img, agg.GraphicsContextArray):
            if img.format().startswith('RGBA'):
                format = 'RGBA'
            elif img.format().startswith('RGB'):
                format = 'RGB'
            else:
                converted_img = img.convert_pixel_format('rgba32', inplace=0)
                format = 'RGBA'
            # Should probably take this into account
            # interp = img.get_image_interpolation()
        else:
            warnings.warn("Cannot render image of type %r into SVG context." %
                          type(img))
            return

        if rect == None:
            rect = (0, 0, img.width(), img.height())

        width, height = img.width(), img.height()

        # converted_img now holds an Agg graphics context with the image
        pil_img = PilImage.fromstring(
            format, (converted_img.width(), converted_img.height()),
            converted_img.bmp_array.tostring())

        left, top, width, height = rect
        if width != img.width() or height != img.height():
            # This is not strictly required.
            pil_img = pil_img.resize((int(width), int(height)),
                                     PilImage.NEAREST)
        pil_img = pil_img.transpose(PilImage.FLIP_TOP_BOTTOM)
        # Fix for the SVG backend, which seems to flip x when a transform is provided.
        if self._backend.flipy():
            pil_img = pil_img.transpose(PilImage.FLIP_LEFT_RIGHT)

        mpl_img = _image.frombuffer(pil_img.tostring(), width, height, True)
        mpl_img.is_grayscale = False

        gc = self._backend.new_gc()
        if self.state.clipping_path:
            gc.set_clip_path(self._get_transformed_clip_path())
        transform = Affine2D.from_values(*affine.affine_params(self.get_ctm()))
        self._backend.draw_image(gc,
                                 left,
                                 top,
                                 mpl_img,
                                 dx=width,
                                 dy=height,
                                 transform=transform)
        gc.restore()
示例#12
0
 def set_text_position(self, x, y):
     """
     """
     a, b, c, d, tx, ty = affine.affine_params(self.state.text_matrix)
     tx, ty = x, y
     self.state.text_matrix = affine.affine_from_values(a, b, c, d, tx, ty)
示例#13
0
 def get_text_position(self):
     """
     """
     a, b, c, d, tx, ty = affine.affine_params(self.state.text_matrix)
     return tx, ty
示例#14
0
    def device_draw_image(self, img, rect):
        """
        draw_image(img_gc, rect=(x,y,w,h))

        Draws another gc into this one.  If 'rect' is not provided, then
        the image gc is drawn into this one, rooted at (0,0) and at full
        pixel size.  If 'rect' is provided, then the image is resized
        into the (w,h) given and drawn into this GC at point (x,y).

        img_gc is either a Numeric array (WxHx3 or WxHx4) or a GC from Kiva's
        Agg backend (kiva.agg.GraphicsContextArray).

        Requires the Python Imaging Library (PIL).
        """
        from PIL import Image as PilImage

        # We turn img into a PIL object, since that is what ReportLab
        # requires.  To do this, we first determine if the input image
        # GC needs to be converted to RGBA/RGB.  If so, we see if we can
        # do it nicely (using convert_pixel_format), and if not, we do
        # it brute-force using Agg.


        if type(img) == type(array([])):
            # Numeric array
            converted_img = agg.GraphicsContextArray(img, pix_format='rgba32')
            format = 'RGBA'
        elif isinstance(img, agg.GraphicsContextArray):
            if img.format().startswith('RGBA'):
                format = 'RGBA'
            elif img.format().startswith('RGB'):
                format = 'RGB'
            else:
                converted_img = img.convert_pixel_format('rgba32', inplace=0)
                format = 'RGBA'
            # Should probably take this into account
            # interp = img.get_image_interpolation()
        else:
            warnings.warn("Cannot render image of type %r into SVG context."
                          % type(img))
            return

        # converted_img now holds an Agg graphics context with the image
        pil_img = PilImage.fromstring(format,
                                      (converted_img.width(),
                                       converted_img.height()),
                                      converted_img.bmp_array.tostring())
        if rect == None:
            rect = (0, 0, img.width(), img.height())

        left, top, width, height = rect
        if width != img.width() or height != img.height():
            # This is not strictly required.
            pil_img = pil_img.resize((int(width), int(height)), PilImage.NEAREST)

        png_buffer = StringIO()
        pil_img.save(png_buffer, 'png')
        b64_img_data = b64encode(png_buffer.getvalue())
        png_buffer.close()

        # Draw the actual image.
        m = self.get_ctm()
        # Place the image on the page.
        # Using bottom instead of top here to account for the y-flip.
        m = affine.translate(m, left, height + top)
        transform = 'matrix(%f,%f,%f,%f,%f,%f) scale(1,-1)' % affine.affine_params(m)
        # Flip y to reverse the flip at the start of the document.
        image_data = 'data:image/png;base64,' + b64_img_data
        self._emit('image', transform=transform,
                   width=str(width), height=str(height),
                   preserveAspectRatio='none',
                   kw={ 'xlink:href': image_data })
 def test_affine_params(self):
     a, b, c, d, tx, ty = 1, 2, 3, 4, 5, 6
     trans = affine.affine_from_values(a, b, c, d, tx, ty)
     aa, bb, cc, dd, txx, tyy = affine.affine_params(trans)
     assert ((a, b, c, d, tx, ty) == (aa, bb, cc, dd, txx, tyy))
示例#16
0
 def test_affine_params(self):
     a,b,c,d,tx,ty = 1,2,3,4,5,6
     trans = affine.affine_from_values(a,b,c,d,tx,ty)
     aa,bb,cc,dd,txx,tyy = affine.affine_params(trans)
     assert( (a,b,c,d,tx,ty) == (aa,bb,cc,dd,txx,tyy))
示例#17
0
 def get_text_position(self):
     """
     """
     a, b, c, d, tx, ty = affine.affine_params(self.state.text_matrix)
     return tx, ty
示例#18
0
    def device_draw_image(self, img, rect):
        """
        draw_image(img_gc, rect=(x,y,w,h))

        Draws another gc into this one.  If 'rect' is not provided, then
        the image gc is drawn into this one, rooted at (0,0) and at full
        pixel size.  If 'rect' is provided, then the image is resized
        into the (w,h) given and drawn into this GC at point (x,y).

        img_gc is either a Numeric array (WxHx3 or WxHx4) or a GC from Kiva's
        Agg backend (kiva.agg.GraphicsContextArray).

        Requires the Python Imaging Library (PIL).
        """
        from PIL import Image as PilImage
        from matplotlib import _image

        # We turn img into a PIL object, since that is what ReportLab
        # requires.  To do this, we first determine if the input image
        # GC needs to be converted to RGBA/RGB.  If so, we see if we can
        # do it nicely (using convert_pixel_format), and if not, we do
        # it brute-force using Agg.
        if type(img) == type(array([])):
            # Numeric array
            converted_img = agg.GraphicsContextArray(img, pix_format='rgba32')
            format = 'RGBA'
        elif isinstance(img, agg.GraphicsContextArray):
            if img.format().startswith('RGBA'):
                format = 'RGBA'
            elif img.format().startswith('RGB'):
                format = 'RGB'
            else:
                converted_img = img.convert_pixel_format('rgba32', inplace=0)
                format = 'RGBA'
            # Should probably take this into account
            # interp = img.get_image_interpolation()
        else:
            warnings.warn("Cannot render image of type %r into SVG context."
                          % type(img))
            return

        if rect == None:
            rect = (0, 0, img.width(), img.height())

        width, height = img.width(), img.height()

        # converted_img now holds an Agg graphics context with the image
        pil_img = PilImage.fromstring(format,
                                      (converted_img.width(),
                                       converted_img.height()),
                                      converted_img.bmp_array.tostring())

        left, top, width, height = rect
        if width != img.width() or height != img.height():
            # This is not strictly required.
            pil_img = pil_img.resize((int(width), int(height)), PilImage.NEAREST)
        pil_img = pil_img.transpose(PilImage.FLIP_TOP_BOTTOM)
        # Fix for the SVG backend, which seems to flip x when a transform is provided.
        if self._backend.flipy():
            pil_img = pil_img.transpose(PilImage.FLIP_LEFT_RIGHT)

        mpl_img = _image.frombuffer(pil_img.tostring(), width, height, True)
        mpl_img.is_grayscale = False

        gc = self._backend.new_gc()
        if self.state.clipping_path:
            gc.set_clip_path(self._get_transformed_clip_path())
        transform = Affine2D.from_values(*affine.affine_params(self.get_ctm()))
        self._backend.draw_image(gc, left, top, mpl_img,
                                 dx=width, dy=height, transform=transform)
        gc.restore()
示例#19
0
文件: pdf.py 项目: toddrme2178/enable
 def set_text_matrix(self, ttm):
     """
     """
     a, b, c, d, tx, ty = affine.affine_params(ttm)
     self.gc._textMatrix = (a, b, c, d, tx, ty)
示例#20
0
 def _get_transformed_clip_path(self):
     x, y, width, height = self.state.clipping_path
     rect = ((x, y), (x+width, y), (x+width, y+height), (x, y+height))
     transform = Affine2D.from_values(*affine.affine_params(self.get_ctm()))
     return TransformedPath(Path(rect), transform)
示例#21
0
文件: pdf.py 项目: enthought/enable
 def set_text_matrix(self, ttm):
     """
     """
     a, b, c, d, tx, ty = affine.affine_params(ttm)
     self.gc._textMatrix = (a, b, c, d, tx, ty)