def get_color_pixel(x, y): image = CGDisplayCreateImageForRect(mainID, ((x - 1, y - 1), (x + 1, y + 1))) bitmap = NSBitmapImageRep.alloc() bitmap.initWithCGImage_(image) # Get the RGB color (float values from 0 to 1 per color, plus alpha) at a particular point return bitmap.colorAtX_y_(1, 1)
def image_from_ndarray(array, format, size = None): """ Creates an Image from a numpy ndarray object. The format may be 'RGB' or 'RGBA'. If a size is specified, the array will be implicitly reshaped to that size, otherwise the size is inferred from the first two dimensions of the array. """ if array.itemsize <> 1: raise ValueError("Color component size must be 1 byte") if size is not None: width, height = size data_size = array.size pixel_size = data_size // (width * height) if pixel_size <> len(format): raise ValueError("Array has wrong shape for specified size and format") else: height, width, pixel_size = array.shape if pixel_size <> len(format): raise ValueError("Array has wrong shape for specified format") bps = 8 spp = pixel_size alpha = format.endswith("A") csp = NSCalibratedRGBColorSpace bpp = bps * spp bpr = width * pixel_size fmt = NSAlphaNonpremultipliedBitmapFormat ns_rep = NSBitmapImageRep.alloc() planes = planes_t(array.ctypes.data, 0, 0, 0, 0) ns_rep.initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bitmapFormat_bytesPerRow_bitsPerPixel_( ctypes.addressof(planes), width, height, bps, spp, alpha, False, csp, fmt, bpr, bpp) image = Image.__new__(Image) image._init_from_ns_rep(ns_rep) image._data = array return image
def __init__(self, width, height): GPixmap.__init__(self) #ns_size = NSSize(width, height) #ns_image = NSImage.alloc().initWithSize_(ns_size) ns_image = NSImage.alloc().init() ns_image.setCacheMode_(NSImageCacheNever) row_bytes = 4 * width ns_bitmap = NSBitmapImageRep.alloc().\ initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_( None, width, height, 8, 4, True, False, NSCalibratedRGBColorSpace, row_bytes, 32) ns_image.addRepresentation_(ns_bitmap) ns_bitmap_context = NSGraphicsContext.graphicsContextWithBitmapImageRep_(ns_bitmap) ns_graphics_context = FlippedNSGraphicsContext.alloc().initWithBase_(ns_bitmap_context) ns_tr = NSAffineTransform.transform() ns_tr.translateXBy_yBy_(0.0, height) ns_tr.scaleXBy_yBy_(1.0, -1.0) # Using __class__ to get +saveGraphicsState instead of -saveGraphicsState NSGraphicsContext.__class__.saveGraphicsState() try: NSGraphicsContext.setCurrentContext_(ns_graphics_context) ns_tr.concat() finally: NSGraphicsContext.__class__.restoreGraphicsState() self._init_with_ns_image(ns_image, flipped = True) #False) self._ns_bitmap_image_rep = ns_bitmap self._ns_graphics_context = ns_graphics_context
def generateThumbnail(self): if not self._thumbnail: if usePyObjC: from AppKit import NSBitmapImageRep, NSCalibratedRGBColorSpace, NSGraphicsContext, NSCompositeCopy, NSImage from Foundation import NSRect image = self.image rep = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_( None, int(self.thumbnailSize), int(self.thumbnailSize), 8, 4, True, False, NSCalibratedRGBColorSpace, 0, 32, ) context = NSGraphicsContext.graphicsContextWithBitmapImageRep_(rep) oldContext = NSGraphicsContext.currentContext() NSGraphicsContext.setCurrentContext_(context) image.drawInRect_fromRect_operation_fraction_( NSRect((0, 0), (self.thumbnailSize, self.thumbnailSize)), NSRect((0, 0), image.size()), NSCompositeCopy, 1.0, ) NSGraphicsContext.setCurrentContext_(oldContext) self._thumbnail = NSImage.alloc().initWithSize_((self.thumbnailSize, self.thumbnailSize)) self._thumbnail.addRepresentation_(rep) else: import wx try: image = self.image.Scale(self.thumbnailSize, self.thumbnailSize, wx.IMAGE_QUALITY_HIGH) except AttributeError: # wx 2.6 can't do IMAGE_QUALITY_HIGH image = self.image.Scale(self.thumbnailSize, self.thumbnailSize) self._thumbnail = wx.BitmapFromImage(image) return self._thumbnail
def saveScrawlToBackground(self, layer): font = layer.parent.parent if font.filepath is None: print( "You must save the Glyphs file before a Scrawl background image can be added." ) return data = layer.userData["%s.data" % plugin_id] pixel_size = layer.userData["%s.unit" % plugin_id] pixel_ratio = layer.master.customParameters['ScrawlPenRatio'] if pixel_ratio is None: pixel_ratio = default_pixel_ratio else: pixel_ratio = float(pixel_ratio) rect = NSMakeRect(*layer.userData["%s.rect" % plugin_id]) if data is not None: image_path = join(dirname(font.filepath), "%s-%s.png" % (layer.layerId, layer.parent.name)) try: imgdata = NSBitmapImageRep.alloc().initWithData_(data) except: print("Error saving the image file.") return pngdata = imgdata.representationUsingType_properties_( NSPNGFileType, None) pngdata.writeToFile_atomically_(image_path, False) layer.backgroundImage = GSBackgroundImage(image_path) layer.backgroundImage.position = NSPoint(rect.origin.x, rect.origin.y) layer.backgroundImage.scale = (float(pixel_size), float(pixel_size * pixel_ratio))
def __init__(self, width, height): GPixmap.__init__(self) # ns_size = NSSize(width, height) # ns_image = NSImage.alloc().initWithSize_(ns_size) ns_image = NSImage.alloc().init() ns_image.setCacheMode_(NSImageCacheNever) row_bytes = 4 * width ns_bitmap = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_( None, width, height, 8, 4, True, False, NSCalibratedRGBColorSpace, row_bytes, 32 ) ns_image.addRepresentation_(ns_bitmap) ns_bitmap_context = NSGraphicsContext.graphicsContextWithBitmapImageRep_(ns_bitmap) ns_graphics_context = FlippedNSGraphicsContext.alloc().initWithBase_(ns_bitmap_context) ns_tr = NSAffineTransform.transform() ns_tr.translateXBy_yBy_(0.0, height) ns_tr.scaleXBy_yBy_(1.0, -1.0) # Using __class__ to get +saveGraphicsState instead of -saveGraphicsState NSGraphicsContext.__class__.saveGraphicsState() try: NSGraphicsContext.setCurrentContext_(ns_graphics_context) ns_tr.concat() finally: NSGraphicsContext.__class__.restoreGraphicsState() self._init_with_ns_image(ns_image, flipped=True) # False) self._ns_bitmap_image_rep = ns_bitmap self._ns_graphics_context = ns_graphics_context
def setFromPixels(self, width, height, pixels): if usePyObjC: from AppKit import NSImage, NSBitmapImageRep, NSCalibratedRGBColorSpace self._image = NSImage.alloc().initWithSize_((width, height)) pixels = [(int(r * a / 255), int(g * a / 255), int(b * a / 255), a) for r, g, b, a in pixels] pixels = ''.join([chr(r) + chr(g) + chr(b) + chr(a) for r, g, b, a in pixels]) rep = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_( (pixels, None, None, None, None), width, height, 8, 4, True, False, NSCalibratedRGBColorSpace, width * 4, 32, ) self._image.addRepresentation_(rep) self.__pixels = pixels # it seems that NSImageRep does not copy the pixel data, so we must retain it here else: import wx self._image = wx.EmptyImage(width, height) data = [] for y in xrange(height): for x in xrange(width): pixel = pixels[x + width * y] self._image.SetRGB(x, y, pixel[0], pixel[1], pixel[2]) if self._image.HasAlpha(): self._image.SetAlpha(x, y, pixel[3])
def extract_icon(file_path): file_icon_tiff = NSWorkspace.sharedWorkspace().iconForFile_( file_path).TIFFRepresentation() file_icon_png = NSBitmapImageRep.imageRepWithData_( file_icon_tiff).representationUsingType_properties_(NSPNGFileType, None) file_icon_png_path = file_path + ".png" file_icon_png.writeToFile_atomically_(file_icon_png_path, None) return file_icon_png_path
def get_screen_bitmap(lt, rb): mainID = CGMainDisplayID() # Grab a chunk of the screen from lt to rb image = CGDisplayCreateImageForRect(mainID, (lt, rb)) bitmap = NSBitmapImageRep.alloc() bitmap.initWithCGImage_(image) return bitmap
def loadScrawl(self): if self.current_layer is None: return pen_size = self.current_layer.userData["%s.size" % plugin_id] if pen_size is not None: self.pen_size = pen_size # scrawl pixels # Otherwise, keep the previous size self.pixel_size = self.current_layer.userData["%s.unit" % plugin_id] if self.pixel_size is None: self.pixel_size = default_pixel_size # font units self.pixel_ratio = self.current_layer.master.customParameters[ 'ScrawlPenRatio' ] if self.pixel_ratio is None: self.pixel_ratio = default_pixel_ratio else: self.pixel_ratio = float(self.pixel_ratio) # Drawing rect rect = self.current_layer.userData["%s.rect" % plugin_id] if rect is None: self.loadDefaultRect() else: self.rect = NSMakeRect(*rect) # Image data data = self.current_layer.userData["%s.data" % plugin_id] if data is None: self.data = initImage( self.current_layer, self.rect.size.width, self.rect.size.height, self.pixel_size, self.pixel_ratio ) else: try: self.data = NSBitmapImageRep.alloc().initWithData_(data) self.data.setProperty_withValue_( NSImageColorSyncProfileData, None ) except: print("Error in image data of layer %s" % self.current_layer) self.data = initImage( self.current_layer, self.rect.size.width, self.rect.size.height, self.pixel_size, self.pixel_ratio ) self.needs_save = False
def _init_from_file(self, file): #ns_image = NSImage.alloc().initWithContentsOfFile_(file) #if not ns_image: ns_data = NSData.dataWithContentsOfFile_(file) if not ns_data: raise EnvironmentError("Unable to read image file: %s" % file) ns_rep = NSBitmapImageRep.imageRepWithData_(ns_data) if not ns_rep: raise ValueError("Unrecognised image file type: %s" % file) ns_rep.setSize_((ns_rep.pixelsWide(), ns_rep.pixelsHigh())) self._init_from_ns_rep(ns_rep)
def image_from_pil_image(pil_image): """Creates an Image from a Python Imaging Library (PIL) Image object.""" mode = pil_image.mode w, h = pil_image.size data = pil_image.tostring() alpha = False cmyk = False floating = False if mode == "1": bps = 1; spp = 1 elif mode == "L": bps = 8; spp = 1 elif mode == "RGB": bps = 8; spp = 3 elif mode == "RGBA": bps = 8; spp = 4; alpha = True elif mode == "CMYK": bps = 8; spp = 4; cmyk = True elif mode == "I": bps = 32; spp = 1 elif mode == "F": bps = 32; spp = 1; floating = True else: raise ValueError("Unsupported PIL image mode '%s'" % mode) if cmyk: csp = NSDeviceCMYKColorSpace else: csp = NSCalibratedRGBColorSpace fmt = NSAlphaNonpremultipliedBitmapFormat if floating: fmt |= NSFloatingPointSamplesBitmapFormat bpp = bps * spp bpr = w * ((bpp + 7) // 8) if debug_pil: print "GUI.PIL:" print "image size =", (w, h) print "data size =", len(data) print "bits per sample =", bps print "samples per pixel =", spp print "bits per pixel =", bpp print "bytes per row =", bpr hack_objc_sig() ns_rep = NSBitmapImageRep.alloc() planes = planes_t(data, "", "", "", "") ns_rep.initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bitmapFormat_bytesPerRow_bitsPerPixel_( ctypes.addressof(planes), w, h, bps, spp, alpha, False, csp, fmt, bpr, bpp) # planes = (data, "", "", "", "") # ns_rep.initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_( # planes, w, h, bps, spp, alpha, False, csp, bpr, bpp) image = Image.__new__(Image) image._init_from_ns_rep(ns_rep) image._data = data return image
def get_color(x, y): """ 取某一点颜色 :param x: x, 使用 X11 坐标 :param y: y, 使用 X11 坐标 :return: """ image = CGDisplayCreateImageForRect(mainID, ((0, 0), (x11_x, x11_y))) bitmap = NSBitmapImageRep.alloc() bitmap.initWithCGImage_(image) data = str(bitmap.colorAtX_y_(x * DPI_times, y * DPI_times)) arr = data.split(' ') return int(float(arr[1]) * 255), int(float(arr[2]) * 255), int( float(arr[3]) * 255)
def _ns_flush(self): glFlush() width, height = self.size pixels = glReadPixels(0, 0, int(width), int(height), GL_RGBA, GL_UNSIGNED_BYTE) bytes_per_row = int(width) * 4 ns_new_bitmap = NSBitmapImageRep.alloc().\ initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_( (pixels, "", "", "", ""), int(width), int(height), 8, 4, True, False, AppKit.NSDeviceRGBColorSpace, bytes_per_row, 0) ns_image = NSImage.alloc().initWithSize_(NSSize(width, height)) ns_image.addRepresentation_(ns_new_bitmap) ns_image.lockFocus() ns_image.unlockFocus() self._ns_image = ns_image self._ns_bitmap_image_rep = ns_new_bitmap
def evaluateToFile_(self, filename): self.canvas.lockFocus() if self.clip: self.clip.setClip() if self.image: self.image.compositeToPoint_operation_((0,0), NSCompositeCopy) elif self.gradient: self.gradient.drawInBezierPath_angle_(self.clip, -90) elif self.color: self.clip.fill(self.color) self.canvas.unlockFocus() image_data = self.canvas.TIFFRepresentation() image_rep = NSBitmapImageRep.imageRepWithData_(image_data) data = image_rep.representationUsingType_properties_(NSPNGFileType, None) data.writeToFile_atomically_(filename, False)
def find_color(rgb, limit_area=False): """ 在整个屏幕 / 指定区域找到某个颜色 TODO :param rgb: :param limit_area: 从 (0, 0) 到 (x, y) 的区域, 使用 X11 坐标 :type limit_area: tuple :return: 颜色位置 """ r, g, b = rgb if limit_area is False: image = CGDisplayCreateImageForRect(mainID, ((0, 0), (x11_x, x11_y))) else: image = CGDisplayCreateImageForRect(mainID, ((0, 0), (limit_area[0], limit_area[1]))) bitmap = NSBitmapImageRep.alloc() bitmap.initWithCGImage_(image)
def toRGBA(self): if usePyObjC: from AppKit import NSBitmapImageRep, NSDeviceRGBColorSpace, NSGraphicsContext, NSCompositeCopy from Foundation import NSRect image = self.image size = image.size() rep = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_( None, int(size.width), int(size.height), 8, 4, True, False, NSDeviceRGBColorSpace, 0, 32, ) context = NSGraphicsContext.graphicsContextWithBitmapImageRep_(rep) oldContext = NSGraphicsContext.currentContext() NSGraphicsContext.setCurrentContext_(context) oldFlipped = image.isFlipped() image.setFlipped_(True) image.drawInRect_fromRect_operation_fraction_( NSRect((0, 0), size), NSRect((0, 0), size), NSCompositeCopy, 1.0, ) image.setFlipped_(oldFlipped) NSGraphicsContext.setCurrentContext_(oldContext) # FIXME: take bytesPerRow into account data = str(rep.bitmapData()) else: image = self.image.Mirror(horizontally = False) # wxImage coordinates are flipped vertically data = list(image.GetData()) rdata = data[0::3] gdata = data[1::3] bdata = data[2::3] if image.HasAlpha(): adata = image.GetAlpha() else: adata = '\xFF' * len(rdata) data = ''.join([r + g + b + a for r, g, b, a in zip(rdata, gdata, bdata, adata)]) return data
def _make_empty_cursor(): global _empty_cursor if not _empty_cursor: from AppKit import NSCursor, NSImage, NSBitmapImageRep, NSDeviceRGBColorSpace from GUI import Cursor import sys if sys.version_info >= (3, 0): b = bytes([0]) else: b = "\x00" d = b * 1024 ns_bitmap = NSBitmapImageRep.alloc().\ initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_\ ((d, d, d, d, d), 16, 16, 8, 4, True, False, NSDeviceRGBColorSpace, 64, 32) ns_image = NSImage.alloc().initWithSize_((16, 16)) ns_image.addRepresentation_(ns_bitmap) ns_cursor = NSCursor.alloc().initWithImage_hotSpot_(ns_image, (0, 0)) _empty_cursor = Cursor._from_ns_cursor(ns_cursor) _empty_cursor._data = d return _empty_cursor
def convertIconToPNG(icon_path, destination_path, desired_pixel_height=350): '''Converts an icns file to a png file, choosing the representation closest to (but >= if possible) the desired_pixel_height. Returns True if successful, False otherwise''' if os.path.exists(icon_path): image_data = NSData.dataWithContentsOfFile_(icon_path) bitmap_reps = NSBitmapImageRep.imageRepsWithData_(image_data) chosen_rep = None for bitmap_rep in bitmap_reps: if not chosen_rep: chosen_rep = bitmap_rep elif (bitmap_rep.pixelsHigh() >= desired_pixel_height and bitmap_rep.pixelsHigh() < chosen_rep.pixelsHigh()): chosen_rep = bitmap_rep if chosen_rep: png_data = chosen_rep.representationUsingType_properties_( NSPNGFileType, None) png_data.writeToFile_atomically_(destination_path, False) return True return False
def render(self, filename): ''' Renders the given build's build preview to an image with the given filename. ''' # Sets up a blank bitmap canvas for drawing to. Such an ugly method # call. Any easier way to do this in Obj-C? init = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_ im = init(None, self.kpf.width, self.kpf.height, 8, 4, True, False, NSDeviceRGBColorSpace, 0, 0) # Set up the Objective-C graphics context based on the bitmap canvas # we just created context = NSGraphicsContext.graphicsContextWithBitmapImageRep_(im) context.setCompositingOperation_(NSCompositeSourceOver) NSGraphicsContext.setCurrentContext_(context) # Ask the implementation to render itself self.__render__() # Output the file imjpeg = im.representationUsingType_properties_(NSJPEGFileType, None) imjpeg.writeToFile_atomically_(filename, False)
def dt_bitmap2d_from_ci_image(ci_image, width, height, grid): """ :param ci_image: a :class:`Quartz.CIImage` instance from PyObjC :param width: desired width in pixels :param height: desired height in pixels :param grid: a four-tuple (x0, y0, dx, dy) spatial grid :returns: a :class:`datatank_py.DTBitmap2D.DTBitmap2D` instance **Requires Mac OS X and PyObjC** This function allows you to turn a :class:`Quartz.CIImage` into an object that DataTank can use. Only 8-bit RGB images are supported at this time. """ from datatank_py.DTBitmap2D import DTBitmap2D from Quartz import CGRectMake, CGPointZero from AppKit import NSBitmapImageRep, NSCalibratedRGBColorSpace, NSGraphicsContext # No matter what, I can't get NSBitmapImageRep to create a rep from planar data or # a passed-in buffer, so I have to let it manage the buffer. Constraining row bytes # seems to work properly, so at least I don't have to deal with that. I really think # PyObjC is buggy here as well. image_rep = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bitmapFormat_bytesPerRow_bitsPerPixel_(None, width, height, 8, 4, True, False, NSCalibratedRGBColorSpace, 0, 4 * width, 32) ns_context = NSGraphicsContext.graphicsContextWithBitmapImageRep_(image_rep) ns_context.CIContext().drawImage_atPoint_fromRect_(ci_image, CGPointZero, CGRectMake(0, 0, width, height)) ns_context.flushGraphics() (red, green, blue, alpha) = __bitmap_planes_from_imagerep(image_rep) dt_bitmap = DTBitmap2D() dt_bitmap.red = red dt_bitmap.green = green dt_bitmap.blue = blue dt_bitmap.grid = grid return dt_bitmap
def main(): args = parseArgs() blur_rate = args.rate paste_board = NSPasteboard.generalPasteboard() image_data = getImageDataFromClipBoard(paste_board) ci_clipboard_image = CIImage.imageWithData_(image_data) blur_filter = CIFilter.filterWithName_('CIGaussianBlur') blur_filter.setDefaults() blur_filter.setValue_forKey_(ci_clipboard_image, "inputImage") blur_filter.setValue_forKey_(blur_rate, "inputRadius") ci_clipboard_image_size = ci_clipboard_image.extent().size output_image = blur_filter.outputImage() croped_transparent_image = output_image.imageByCroppingToRect_(CGRectMake(0 , 0, ci_clipboard_image_size.width, ci_clipboard_image_size.height)) bitmap_image = NSBitmapImageRep.alloc().initWithCIImage_(croped_transparent_image) paste_board.clearContents() paste_board.setData_forType_(bitmap_image.TIFFRepresentation(), NSPasteboardTypeTIFF) print("Done.")
def as_matrix(self, normalize=False, binarize=False): """Renders the glyph as a matrix. By default, the matrix values are integer pixel greyscale values in the range 0 to 255, but they can be normalized or turned into binary values with the appropriate keyword arguments. The matrix is returned as a `GlyphRendering` object which can be further manipulated.""" box_height = int(self.font.full_height_px) box_width = int(self.ink_width) b = NSBitmapImageRep.alloc( ).initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_( None, box_width, box_height, 8, 1, False, False, NSCalibratedWhiteColorSpace, 0, 0) ctx = NSGraphicsContext.graphicsContextWithBitmapImageRep_(b) assert (ctx) NSGraphicsContext.setCurrentContext_(ctx) NSColor.whiteColor().setFill() p2 = NSBezierPath.bezierPath() p2.appendBezierPath_(self.layer.completeBezierPath) t = NSAffineTransform.transform() t.translateXBy_yBy_(-self.lsb, -self.font.descender * self.font.scale_factor) t.scaleBy_(self.font.scale_factor) p2.transformUsingAffineTransform_(t) p2.fill() png = b.representationUsingType_properties_(NSPNGFileType, None) png.writeToFile_atomically_("/tmp/foo.png", False) Z = np.array(b.bitmapData()) box_width_up = Z.shape[0] / box_height Z = Z.reshape((box_height, box_width_up))[0:box_height, 0:box_width] if normalize or binarize: Z = Z / 255.0 if binarize: Z = Z.astype(int) return GlyphRendering.init_from_numpy(self, Z)
def initImage(layer, width, height, pixel_size=default_pixel_size, ratio=1): # See https://developer.apple.com/documentation/appkit/nsbitmapimagerep/1395538-init img = NSBitmapImageRep.alloc( ).initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bitmapFormat_bytesPerRow_bitsPerPixel_( None, # BitmapDataPlanes int(round(width / pixel_size)), # pixelsWide int(round(height / pixel_size / ratio)), # pixelsHigh 8, # bitsPerSample: 1, 2, 4, 8, 12, or 16 1, # samplesPerPixel: 1 - 5 False, # hasAlpha False, # isPlanar NSDeviceWhiteColorSpace, # colorSpaceName # NSDeviceRGBColorSpace, 0, # bitmapFormat 0, # bytesPerRow 0, # bitsPerPixel ) """ NSCalibratedWhiteColorSpace NSCalibratedBlackColorSpace NSCalibratedRGBColorSpace NSDeviceWhiteColorSpace NSDeviceBlackColorSpace NSDeviceRGBColorSpace NSDeviceCMYKColorSpace NSNamedColorSpace NSCustomColorSpace """ # The image is filled black for some reason, make it white current = NSGraphicsContext.currentContext() context = NSGraphicsContext.graphicsContextWithBitmapImageRep_(img) NSGraphicsContext.setCurrentContext_(context) NSColor.whiteColor().set() # NSBezierPath.setLineWidth_(1) NSBezierPath.fillRect_(NSMakeRect(0, 0, width, int(round(height / ratio)))) NSGraphicsContext.setCurrentContext_(current) return img
def image_from_ndarray(array, format, size=None): """ Creates an Image from a numpy ndarray object. The format may be 'RGB' or 'RGBA'. If a size is specified, the array will be implicitly reshaped to that size, otherwise the size is inferred from the first two dimensions of the array. """ if array.itemsize <> 1: raise ValueError("Color component size must be 1 byte") if size is not None: width, height = size data_size = array.size pixel_size = data_size // (width * height) if pixel_size <> len(format): raise ValueError( "Array has wrong shape for specified size and format") else: height, width, pixel_size = array.shape if pixel_size <> len(format): raise ValueError("Array has wrong shape for specified format") bps = 8 spp = pixel_size alpha = format.endswith("A") csp = NSCalibratedRGBColorSpace bpp = bps * spp bpr = width * pixel_size fmt = NSAlphaNonpremultipliedBitmapFormat ns_rep = NSBitmapImageRep.alloc() planes = planes_t(array.ctypes.data, 0, 0, 0, 0) ns_rep.initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bitmapFormat_bytesPerRow_bitsPerPixel_( ctypes.addressof(planes), width, height, bps, spp, alpha, False, csp, fmt, bpr, bpp) image = Image.__new__(Image) image._init_from_ns_rep(ns_rep) image._data = array return image
from AppKit import NSBitmapImageRep from Quartz.CoreGraphics import CGMainDisplayID # Big big thanks to https://bitbucket.org/ronaldoussoren/pyobjc/ for updated .bridgesupport files # And also this particular message that showed how to use them: # http://www.mail-archive.com/[email protected]/msg09749.html # Import the definition for CGDisplayCreateImageForRect objc.parseBridgeSupport( """<?xml version='1.0'?> <!DOCTYPE signatures SYSTEM "file://localhost/System/Library/DTDs/BridgeSupport.dtd"> <signatures version='1.0'> <depends_on path='/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation' /> <depends_on path='/System/Library/Frameworks/IOKit.framework/IOKit' /> <depends_on path='/System/Library/Frameworks/CoreServices.framework/CoreServices' /> <function name='CGDisplayCreateImageForRect'> <retval already_cfretained='true' type='^{CGImage=}' /> <arg type='I' /> <arg type='{CGRect={CGPoint=ff}{CGSize=ff}}' type64='{CGRect={CGPoint=dd}{CGSize=dd}}' /> </function> </signatures> """, globals(), '/System/Library/Frameworks/ApplicationServices.framework/Frameworks/CoreGraphics.framework') mainID = CGMainDisplayID() # Grab a chunk of the screen from 0,0 to 100,100 from top left image = CGDisplayCreateImageForRect(mainID, ((0,0), (100,100))) bitmap = NSBitmapImageRep.alloc() bitmap.initWithCGImage_(image) # Get the RGB color (float values from 0 to 1 per color, plus alpha) at a particular point bitmap.colorAtX_y_(93.484375, 60.4921875)
if not subtitle or not message: filename = Path(status["file"]).name subtitle = filename center = NSUserNotificationCenter.defaultUserNotificationCenter() notification = NSUserNotification.alloc().init() notification.setTitle_(title) notification.setSubtitle_(subtitle) notification.setInformativeText_(message) # To-Do: Data allocation currently doesn't work in Catalina if mac_ver()[0] != "10.15": if cover is not None: # the song has an embedded cover image data = NSData.alloc().initWithBytes_length_(cover, len(cover)) image_rep = NSBitmapImageRep.alloc().initWithData_(data) # CGImageGetWidth started returning bogus values in macOS 10.14 -> # Use Pillow to extract the image dimensions size = NSMakeSize(*Image.open(BytesIO(cover)).size) image = NSImage.alloc().initWithSize_(size) image.addRepresentation_(image_rep) if env.itunes_style_notification: notification.setValue_forKey_(image, "_identityImage") else: notification.setValue_forKey_( NSImage.alloc().initByReferencingFile_(str(env.app_icon)), "_identityImage", )
def image_from_pil_image(pil_image): """Creates an Image from a Python Imaging Library (PIL) Image object.""" mode = pil_image.mode w, h = pil_image.size data = pil_image.tostring() alpha = False cmyk = False floating = False if mode == "1": bps = 1 spp = 1 elif mode == "L": bps = 8 spp = 1 elif mode == "RGB": bps = 8 spp = 3 elif mode == "RGBA": bps = 8 spp = 4 alpha = True elif mode == "CMYK": bps = 8 spp = 4 cmyk = True elif mode == "I": bps = 32 spp = 1 elif mode == "F": bps = 32 spp = 1 floating = True else: raise ValueError("Unsupported PIL image mode '%s'" % mode) if cmyk: csp = NSDeviceCMYKColorSpace else: csp = NSCalibratedRGBColorSpace fmt = NSAlphaNonpremultipliedBitmapFormat if floating: fmt |= NSFloatingPointSamplesBitmapFormat bpp = bps * spp bpr = w * ((bpp + 7) // 8) if debug_pil: print "GUI.PIL:" print "image size =", (w, h) print "data size =", len(data) print "bits per sample =", bps print "samples per pixel =", spp print "bits per pixel =", bpp print "bytes per row =", bpr hack_objc_sig() ns_rep = NSBitmapImageRep.alloc() planes = planes_t(data, "", "", "", "") ns_rep.initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bitmapFormat_bytesPerRow_bitsPerPixel_( ctypes.addressof(planes), w, h, bps, spp, alpha, False, csp, fmt, bpr, bpp) # planes = (data, "", "", "", "") # ns_rep.initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_( # planes, w, h, bps, spp, alpha, False, csp, bpr, bpp) image = Image.__new__(Image) image._init_from_ns_rep(ns_rep) image._data = data return image
import tempfile from mojo.roboFont import CurrentGlyph from AppKit import NSPNGFileType, NSBitmapImageRep g = CurrentGlyph() result = g.getRepresentation("money.money.money") if result: im, offset = result imagePath = tempfile.mkstemp(suffix=".png")[1] imageRep = NSBitmapImageRep.imageRepWithData_(im.TIFFRepresentation()) imageData = imageRep.representationUsingType_properties_(NSPNGFileType, None) imageData.writeToFile_atomically_(imagePath, True) g.addImage(path=imagePath, position=offset)
def generateImage(self): size = self.imageSize dx, dy = self.dx, self.dy image = self.texture.image if usePyObjC: from AppKit import NSBitmapImageRep, NSCalibratedRGBColorSpace, NSGraphicsContext, NSCompositeCopy, NSImage from Foundation import NSRect rep = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_( None, int(size), int(size), 8, 4, True, False, NSCalibratedRGBColorSpace, 0, 32, ) context = NSGraphicsContext.graphicsContextWithBitmapImageRep_(rep) oldContext = NSGraphicsContext.currentContext() NSGraphicsContext.setCurrentContext_(context) image.drawInRect_fromRect_operation_fraction_( NSRect((0, 0), (size, size)), NSRect((0, 0), image.size()), NSCompositeCopy, 1.0, ) NSGraphicsContext.setCurrentContext_(oldContext) image = NSImage.alloc().initWithSize_((size, size)) image.addRepresentation_(rep) rep = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_( None, int(size), int(size), 8, 4, True, False, NSCalibratedRGBColorSpace, 0, 32, ) context = NSGraphicsContext.graphicsContextWithBitmapImageRep_(rep) oldContext = NSGraphicsContext.currentContext() NSGraphicsContext.setCurrentContext_(context) srcPoints = ( (0, 0), (size - dx, 0), (0, size - dy), (size - dx, size - dy), ) dstPoints = ( (dx, dy), (0, dy), (dx, 0), (0, 0), ) sizes = ( (size - dx, size - dy), (dx, size - dy), (size - dx, dy), (dx, dy), ) for src, dst, siz in zip(srcPoints, dstPoints, sizes): if siz[0] > 0 and siz[1] > 0: # not sure if Cocoa appreciates trying to draw an image with invalid bounds image.drawInRect_fromRect_operation_fraction_( NSRect(dst, siz), NSRect(src, siz), NSCompositeCopy, 1.0, ) NSGraphicsContext.setCurrentContext_(oldContext) result = NSImage.alloc().initWithSize_((size, size)) result.addRepresentation_(rep) else: import wx try: image = image.Scale(size, size, wx.IMAGE_QUALITY_HIGH) except AttributeError: # wx 2.6 can't do IMAGE_QUALITY_HIGH image = image.Scale(size, size) result = wx.BitmapFromImage(image) return result
message += ' – %s' % status["album"] if "date" in status and status["date"].isnumeric(): message += " (%s)" % status["date"] center = NSUserNotificationCenter.defaultUserNotificationCenter() notification = NSUserNotification.alloc().init() notification.setTitle_(title) notification.setSubtitle_(subtitle) notification.setInformativeText_(message) if cover: # the song has an embedded cover image data = NSData.alloc().initWithBytes_length_(cover, len(cover)) image_rep = NSBitmapImageRep.alloc().initWithData_(data) size = NSMakeSize(CGImageGetWidth(image_rep), CGImageGetHeight(image_rep)) image = NSImage.alloc().initWithSize_(size) image.addRepresentation_(image_rep) if config.itunes_style_notification: notification.setValue_forKey_(image, "_identityImage") else: notification.setValue_forKey_( NSImage.alloc().initByReferencingFile_(config.app_icon), "_identityImage") notification.setContentImage_(image) else: # song has no cover image, show an icon notification.setValue_forKey_( NSImage.alloc().initByReferencingFile_(config.app_icon), "_identityImage") if config.display_mode == 1:
import tempfile from mojo.roboFont import CurrentGlyph from AppKit import NSPNGFileType, NSBitmapImageRep g = CurrentGlyph() result = g.getRepresentation("money.money.money") if result: im, offset = result imagePath = tempfile.mkstemp(suffix=".png")[1] imageRep = NSBitmapImageRep.imageRepWithData_(im.TIFFRepresentation()) imageData = imageRep.representationUsingType_properties_( NSPNGFileType, None) imageData.writeToFile_atomically_(imagePath, True) g.addImage(path=imagePath, position=offset)