def _debug_draw_clipping_path(self, x, y, width, height): a,b,c,d,tx,ty = affine.affine_params(self.get_ctm()) transform = 'matrix(%(a)f,%(b)f,%(c)f,%(d)f,%(tx)f,%(ty)f)' % locals() self._emit('rect', x=x, y=y, width=width, height=height, transform=transform, style=_mkstyle({'stroke-width': 5, 'fill':'none', 'stroke': 'green'}))
def device_set_clipping_path(self, x, y, width, height): #self._debug_draw_clipping_path(x, y, width, height) #return global _clip_counter self.state._clip_id = 'clip_%d' % _clip_counter _clip_counter += 1 x,y = self._fixpoints([[x,y]])[0] a,b,c,d,tx,ty = affine.affine_params(self.get_ctm()) transform = 'matrix(%(a)f,%(b)f,%(c)f,%(d)f,%(tx)f,%(ty)f)' % locals() rect = self._build('rect', x=x, y=y, width=width, height=height) clippath = self._build('clipPath', contents=rect, id=self.state._clip_id) self._emit('g', transform=transform, contents=clippath)
def device_fill_points(self, points, mode): points = self._fixpoints(points) if mode in (FILL, FILL_STROKE, EOF_FILL_STROKE): fill = self._color(self.state.fill_color) else: fill = 'none' if mode in (STROKE, FILL_STROKE, EOF_FILL_STROKE): stroke = self._color(self.state.line_color) else: stroke = 'none' if mode in (EOF_FILL_STROKE, EOF_FILL): rule = 'evenodd' else: rule = 'nonzero' linecap = line_cap_map[self.state.line_cap] linejoin = line_join_map[self.state.line_join] dasharray = self._dasharray() width = '%3.3f' % self.state.line_width clip_id = getattr(self.state, '_clip_id', None) if clip_id: clip = 'url(#' + clip_id +')' else: clip = None a,b,c,d,tx,ty = affine.affine_params(self.get_ctm()) transform = 'matrix(%(a)f,%(b)f,%(c)f,%(d)f,%(tx)f,%(ty)f)' % locals() if mode == STROKE: opacity = '%1.3f' % self.state.line_color[-1] self._emit('polyline', transform=transform, points=_strpoints(points), kw=default_filter({'clip-path': (clip, None)}), style=_mkstyle(default_filter({'opacity': (opacity, "1.000"), 'stroke': stroke, 'fill': 'none', 'stroke-width': (width, "1.000"), 'stroke-linejoin': (linejoin, 'miter'), 'stroke-linecap': (linecap, 'butt'), 'stroke-dasharray': (dasharray, 'none')}))) else: opacity = '%1.3f' % self.state.fill_color[-1] self._emit('polygon', transform=transform, points=_strpoints(points), kw=default_filter({'clip-path': (clip, None)}), style=_mkstyle(default_filter({'opacity': (opacity, "1.000"), 'stroke-width': (width, "1.000"), 'fill': fill, 'fill-rule': rule, 'stroke': stroke, 'stroke-linejoin': (linejoin, 'miter'), 'stroke-linecap': (linecap, 'butt'), 'stroke-dasharray': (dasharray, 'none')})))
def device_show_text(self, text): ttm = self.get_text_matrix() ctm = self.get_ctm() # not device_ctm!! m = affine.concat(ctm,ttm) #height = self.get_full_text_extent(text)[1] a,b,c,d,tx,ty = affine.affine_params(m) transform = 'matrix(%(a)f,%(b)f,%(c)f,%(d)f,%(tx)f,%(ty)f) scale(1,-1)' % locals() self._emit('text', contents=text, kw={'font-family': self.face_name, 'font-size': str(self.font_size), 'xml:space': 'preserve', 'transform': transform})
def device_transform_device_ctm(self,func,args): if func == LOAD_CTM: self.contents.write('initmatrix\n') func = CONCAT_CTM if func == SCALE_CTM: sx, sy = args self.contents.write('%.3f %.3f scale\n' % (sx, sy)) elif func == ROTATE_CTM: r, = args self.contents.write('%.3f rotate\n' % r) elif func == TRANSLATE_CTM: tx, ty = args self.contents.write('%.3f %.3f translate\n' % (tx, ty)) elif func == CONCAT_CTM: m, = args self.contents.write('[%.3f %.3f %.3f %.3f %.3f %.3f] concat\n' % \ affine.affine_params(m))
def device_draw_image(self, img, rect): """ draw_image(img_gc, rect=(x,y,w,h)) Draws another gc into this one. If 'rect' is not provided, then the image gc is drawn into this one, rooted at (0,0) and at full pixel size. If 'rect' is provided, then the image is resized into the (w,h) given and drawn into this GC at point (x,y). img_gc is either a Numeric array (WxHx3 or WxHx4) or a GC from Kiva's Agg backend (kiva.agg.GraphicsContextArray). Requires the Python Imaging Library (PIL). """ from kiva.compat import pilfromstring, piltostring if type(img) == type(array([])): # Numeric array converted_img = agg.GraphicsContextArray(img, pix_format='rgba32') format = 'RGBA' elif isinstance(img, agg.GraphicsContextArray): if img.format().startswith('RGBA'): format = 'RGBA' elif img.format().startswith('RGB'): format = 'RGB' else: converted_img = img.convert_pixel_format('rgba32', inplace=0) format = 'RGBA' # Should probably take this into account # interp = img.get_image_interpolation() else: warnings.warn("Cannot render image of type %r into EPS context." % type(img)) return # converted_img now holds an Agg graphics context with the image pil_img = pilfromstring(format, (converted_img.width(), converted_img.height()), piltostring(converted_img.bmp_array)) if rect == None: rect = (0, 0, img.width(), img.height()) # PIL PS output doesn't support alpha. if format != 'RGB': pil_img = pil_img.convert('RGB') left, top, width, height = rect if width != img.width() or height != img.height(): # This is not strictly required. pil_img = pil_img.resize((int(width), int(height)), PilImage.NEAREST) self.contents.write('gsave\n') self.contents.write('initmatrix\n') m = self.get_ctm() self.contents.write('[%.3f %.3f %.3f %.3f %.3f %.3f] concat\n' % \ affine.affine_params(m)) self.contents.write('%.3f %.3f translate\n' % (left, top)) # Rely on PIL's EpsImagePlugin to do the hard work here. pil_img.save(self.contents, 'eps', eps=0) self.contents.write('grestore\n')
def device_draw_image(self, img, rect): """ draw_image(img_gc, rect=(x,y,w,h)) Draws another gc into this one. If 'rect' is not provided, then the image gc is drawn into this one, rooted at (0,0) and at full pixel size. If 'rect' is provided, then the image is resized into the (w,h) given and drawn into this GC at point (x,y). img_gc is either a Numeric array (WxHx3 or WxHx4) or a GC from Kiva's Agg backend (kiva.agg.GraphicsContextArray). Requires the Python Imaging Library (PIL). """ from kiva.compat import pilfromstring, piltostring, Image as PilImage # We turn img into a PIL object, since that is what ReportLab # requires. To do this, we first determine if the input image # GC needs to be converted to RGBA/RGB. If so, we see if we can # do it nicely (using convert_pixel_format), and if not, we do # it brute-force using Agg. if type(img) == type(array([])): # Numeric array converted_img = agg.GraphicsContextArray(img, pix_format='rgba32') format = 'RGBA' elif isinstance(img, agg.GraphicsContextArray): if img.format().startswith('RGBA'): format = 'RGBA' elif img.format().startswith('RGB'): format = 'RGB' else: converted_img = img.convert_pixel_format('rgba32', inplace=0) format = 'RGBA' # Should probably take this into account # interp = img.get_image_interpolation() else: warnings.warn("Cannot render image of type %r into SVG context." % type(img)) return # converted_img now holds an Agg graphics context with the image pil_img = pilfromstring(format, (converted_img.width(), converted_img.height()), piltostring(converted_img.bmp_array)) if rect == None: rect = (0, 0, img.width(), img.height()) left, top, width, height = rect if width != img.width() or height != img.height(): # This is not strictly required. pil_img = pil_img.resize((int(width), int(height)), PilImage.NEAREST) png_buffer = StringIO() pil_img.save(png_buffer, 'png') b64_img_data = b64encode(png_buffer.getvalue()) png_buffer.close() # Draw the actual image. m = self.get_ctm() # Place the image on the page. # Using bottom instead of top here to account for the y-flip. m = affine.translate(m, left, height + top) transform = 'matrix(%f,%f,%f,%f,%f,%f) scale(1,-1)' % affine.affine_params(m) # Flip y to reverse the flip at the start of the document. image_data = 'data:image/png;base64,' + b64_img_data self._emit('image', transform=transform, width=str(width), height=str(height), preserveAspectRatio='none', kw={ 'xlink:href': image_data })
def set_text_matrix(self,ttm): """ """ a, b, c, d, tx, ty = affine.affine_params(ttm) #print "set text matrix", a,b,c,d,tx,ty self.gc._textMatrix=(a, b, c, d, tx, ty)