def createAlphaOnlyContext(width, height): # This routine allocates data for a pixel array that contains # width*height pixels, each pixel is 1 byte. The format is # 8 bits per pixel, where the data is the alpha value of the pixel. # Minimum bytes per row is 1 byte per sample * number of samples. bytesPerRow = width # Round to nearest multiple of BEST_BYTE_ALIGNMENT. bytesPerRow = COMPUTE_BEST_BYTES_PER_ROW(bytesPerRow) # Allocate the data for the raster. The total amount of data is bytesPerRow #// times the number of rows. The function 'calloc' is used so that the #// memory is initialized to 0. try: rasterData = objc.allocateBuffer(bytesPerRow * height) except MemoryError: return None # This type of context is only available in Panther and later, otherwise # this fails and returns a NULL context. The color space for an alpha #// only context is NULL and the BitmapInfo value is kCGImageAlphaOnly. context = CGBitmapContextCreate(rasterData, width, height, 8, bytesPerRow, None, kCGImageAlphaOnly) if context is None: print >> sys.stderr, "Couldn't create the context!" return None _rasterDataForContext[context] = rasterData # Clear the context bits so they are initially transparent. CGContextClearRect(context, CGRectMake(0, 0, width, height)) return context
def createRGBRampDataProvider(): width = 256 height = 256 imageDataSize = width*height*3 dataP = objc.allocateBuffer(imageDataSize) # Build an image that is RGB 24 bits per sample. This is a ramp # where the red component value increases in red from left to # right and the green component increases from top to bottom. # idx=0 for g in xrange(height): for r in xrange(width): dataP[idx] = chr(r) dataP[idx+1] = chr(g) dataP[idx+2] = '\0' idx += 3 # Once this data provider is created, the data associated # with dataP MUST be available until Quartz calls the data # releaser function 'rgbReleaseRampData'. dataProvider = CGDataProviderCreateWithData( None, dataP, imageDataSize, None) return dataProvider
def createRGBBitmapContext(width, height, wantDisplayColorSpace, needsTransparentBitmap): # This routine allocates data for a pixel array that contains width*height # pixels where each pixel is 4 bytes. The format is 8-bit ARGB or XRGB, depending on # whether needsTransparentBitmap is true. In order to get the recommended # pixel alignment, the bytesPerRow is rounded up to the nearest multiple # of BEST_BYTE_ALIGNMENT bytes. # Minimum bytes per row is 4 bytes per sample * number of samples. bytesPerRow = width * 4 # Round to nearest multiple of BEST_BYTE_ALIGNMENT. bytesPerRow = COMPUTE_BEST_BYTES_PER_ROW(bytesPerRow) # Allocate the data for the raster. The total amount of data is bytesPerRow # times the number of rows. The function 'calloc' is used so that the # memory is initialized to 0. try: rasterData = objc.allocateBuffer(int(bytesPerRow * height)) except MemoryError: return None # The wantDisplayColorSpace argument passed to the function determines # whether or not to use the display color space or the generic calibrated # RGB color space. The needsTransparentBitmap argument determines whether # create a context that records alpha or not. if wantDisplayColorSpace: cs = Utilities.getTheDisplayColorSpace() else: cs = Utilities.getTheCalibratedRGBColorSpace() if needsTransparentBitmap: transparency = Quartz.kCGImageAlphaPremultipliedFirst else: transparency = Quartz.kCGImageAlphaPremultipliedFirst context = Quartz.CGBitmapContextCreate(rasterData, width, height, 8, bytesPerRow, cs, transparency) if context is None: return None _rasterDataForContext[context] = rasterData # Either clear the rect or paint with opaque white, depending on # the needs of the caller. if needsTransparentBitmap: # Clear the context bits so they are transparent. Quartz.CGContextClearRect(context, Quartz.CGRectMake(0, 0, width, height)) else: # Since the drawing destination is opaque, first paint # the context bits to white. Quartz.CGContextSaveGState(context) Quartz.CGContextSetFillColorWithColor( context, Utilities.getRGBOpaqueWhiteColor()) Quartz.CGContextFillRect(context, Quartz.CGRectMake(0, 0, width, height)) Quartz.CGContextRestoreGState(context) return context
def testPixelFormat(self): width = 16 height = 16 i1 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bitmapFormat_bytesPerRow_bitsPerPixel_( None, width, height, 8, 3, NO, NO, NSDeviceRGBColorSpace, NSAlphaFirstBitmapFormat, 0, 0 ) self.assertIsInstance(i1, NSBitmapImageRep) singlePlane = objc.allocateBuffer(width * height * 4) for i in range(0, width * height): si = i * 4 singlePlane[si] = 1 singlePlane[si + 1] = 2 singlePlane[si + 2] = 3 singlePlane[si + 3] = 4 dataPlanes = (singlePlane, None, None, None, None) # test non-planar, premade buffer i2 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bitmapFormat_bytesPerRow_bitsPerPixel_( dataPlanes, width, height, 8, 3, NO, NO, NSDeviceRGBColorSpace, NSAlphaFirstBitmapFormat, 0, 0 ) self.assertIsInstance(i2, NSBitmapImageRep) bitmapData = i2.bitmapData() self.assertEqual(len(bitmapData), width * height * 4)
def testPixelFormat(self): width = 16 height = 16 i1 = NSBitmapImageRep.alloc( ).initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bitmapFormat_bytesPerRow_bitsPerPixel_( None, width, height, 8, 3, NO, NO, NSDeviceRGBColorSpace, NSAlphaFirstBitmapFormat, 0, 0) self.assertIsInstance(i1, NSBitmapImageRep) singlePlane = objc.allocateBuffer(width * height * 4) for i in range(0, width * height): si = i * 4 singlePlane[si] = 1 singlePlane[si + 1] = 2 singlePlane[si + 2] = 3 singlePlane[si + 3] = 4 dataPlanes = (singlePlane, None, None, None, None) # test non-planar, premade buffer i2 = NSBitmapImageRep.alloc( ).initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bitmapFormat_bytesPerRow_bitsPerPixel_( dataPlanes, width, height, 8, 3, NO, NO, NSDeviceRGBColorSpace, NSAlphaFirstBitmapFormat, 0, 0) self.assertIsInstance(i2, NSBitmapImageRep) bitmapData = i2.bitmapData() self.assertEqual(len(bitmapData), width * height * 4)
def createAlphaOnlyContext(width, height): # This routine allocates data for a pixel array that contains # width*height pixels, each pixel is 1 byte. The format is # 8 bits per pixel, where the data is the alpha value of the pixel. # Minimum bytes per row is 1 byte per sample * number of samples. bytesPerRow = width; # Round to nearest multiple of BEST_BYTE_ALIGNMENT. bytesPerRow = COMPUTE_BEST_BYTES_PER_ROW(bytesPerRow); # Allocate the data for the raster. The total amount of data is bytesPerRow #// times the number of rows. The function 'calloc' is used so that the #// memory is initialized to 0. try: rasterData = objc.allocateBuffer(bytesPerRow * height); except MemoryError: return None # This type of context is only available in Panther and later, otherwise # this fails and returns a NULL context. The color space for an alpha #// only context is NULL and the BitmapInfo value is kCGImageAlphaOnly. context = CGBitmapContextCreate(rasterData, width, height, 8, bytesPerRow, None, kCGImageAlphaOnly); if context is None: print >>sys.stderr, "Couldn't create the context!" return None _rasterDataForContext[context] = rasterData # Clear the context bits so they are initially transparent. CGContextClearRect(context, CGRectMake(0, 0, width, height)) return context;
def grab_desktop(self, return_type=0): """ grab desktop screenshot. :type return_type: int :param return_type: 0 for pil image, 1 for color matrix, 2 for gray matrix :rtype: numpy.ndarray or Image.Image :return: the screenshot image """ ret_image = None # Create screenshot as CGImage image = CG.CGWindowListCreateImage(CG.CGRectInfinite, CG.kCGWindowListOptionOnScreenOnly, CG.kCGNullWindowID, CG.kCGWindowImageDefault) width = CG.CGImageGetWidth(image) height = CG.CGImageGetHeight(image) # Allocate enough space to hold our pixels imageData = objc.allocateBuffer(int(4 * width * height)) # Create the bitmap context bitmapContext = CG.CGBitmapContextCreate( imageData, # image data we just allocated... width, # width height, # height 8, # 8 bits per component 4 * width, # bytes per pixel times number of pixels wide CG.CGImageGetColorSpace( image), # use the same colorspace as the original image CG.kCGImageAlphaPremultipliedLast) # use premultiplied alpha CG.CGContextDrawImage(bitmapContext, CG.CGRectMake(0, 0, width, height), image) #Now your rawData contains the image data in the RGBA8888 pixel format. #del bitmapContext ret_image = Image.frombuffer("RGBA", (width, height), imageData, "raw", "RGBA", 0, 1) #return ret_image #ret_image.save('out.jpg') ret_image = ret_image.convert('RGB') #ret_image.save('out.jpg') if return_type == self.get_color_mat: return self._get_cv_color_mat(ret_image) if return_type == self.get_gray_mat: mat = self._get_cv_color_mat(ret_image) return self._get_cv_gray_mat(mat) else: return ret_image
def createRGBBitmapContext(width, height, wantDisplayColorSpace, needsTransparentBitmap): # This routine allocates data for a pixel array that contains width*height # pixels where each pixel is 4 bytes. The format is 8-bit ARGB or XRGB, depending on # whether needsTransparentBitmap is true. In order to get the recommended # pixel alignment, the bytesPerRow is rounded up to the nearest multiple # of BEST_BYTE_ALIGNMENT bytes. # Minimum bytes per row is 4 bytes per sample * number of samples. bytesPerRow = width*4; # Round to nearest multiple of BEST_BYTE_ALIGNMENT. bytesPerRow = COMPUTE_BEST_BYTES_PER_ROW(bytesPerRow); # Allocate the data for the raster. The total amount of data is bytesPerRow # times the number of rows. The function 'calloc' is used so that the # memory is initialized to 0. try: rasterData = objc.allocateBuffer(int(bytesPerRow * height)) except MemoryError: return None # The wantDisplayColorSpace argument passed to the function determines # whether or not to use the display color space or the generic calibrated # RGB color space. The needsTransparentBitmap argument determines whether # create a context that records alpha or not. if wantDisplayColorSpace: cs = Utilities.getTheDisplayColorSpace() else: cs = Utilities.getTheCalibratedRGBColorSpace() if needsTransparentBitmap: transparency = kCGImageAlphaPremultipliedFirst else: transparency = kCGImageAlphaPremultipliedFirst context = CGBitmapContextCreate(rasterData, width, height, 8, bytesPerRow, cs, transparency) if context is None: return None _rasterDataForContext[context] = rasterData # Either clear the rect or paint with opaque white, depending on # the needs of the caller. if needsTransparentBitmap: # Clear the context bits so they are transparent. CGContextClearRect(context, CGRectMake(0, 0, width, height)) else: # Since the drawing destination is opaque, first paint # the context bits to white. CGContextSaveGState(context) CGContextSetFillColorWithColor(context, Utilities.getRGBOpaqueWhiteColor()) CGContextFillRect(context, CGRectMake(0, 0, width, height)) CGContextRestoreGState(context) return context
def testBuffer(self): b = objc.allocateBuffer(10000) self.assertEquals(len(b), 10000) for i in range(0,10000): b[i] = chr(i % 256) b[5:10] = b[1:6] b[5:10] = 'abcde' try: b[5:10] = 'abcdefghijk' except TypeError, r: if str(r).find("right operand length must match slice length") is not 0: raise
def testBuffer(self): b = objc.allocateBuffer(10000) self.assertEquals(len(b), 10000) for i in range(0, 10000): b[i] = chr(i % 256) b[5:10] = b[1:6] b[5:10] = 'abcde' try: b[5:10] = 'abcdefghijk' except TypeError, r: if str(r).find( "right operand length must match slice length") is not 0: raise
def testBuffer(self): b = objc.allocateBuffer(10000) self.assertEqual(len(b), 10000) if sys.version_info[0] == 2: for i in range(0,10000): b[i] = chr(i % 256) b[5:10] = b[1:6] b[5:10] = 'abcde' try: b[5:10] = 'abcdefghijk' except TypeError as r: if str(r).find("right operand length must match slice length") is not 0: raise else: self.assertIsInstance(b, bytearray)
def createRedGreenRampImageData(width, height, size): try: dataP = objc.allocateBuffer(size) except MemoryError: return None idx = 0 # Build an image that is RGB 24 bits per sample. This is a ramp # where the red component value increases in red from left to # right and the green component increases from top to bottom. for g in range(height): for r in range(width): dataP[idx + 0] = r dataP[idx + 1] = g dataP[idx + 2] = 0 idx += 3 return dataP
def renderImage(self, dest, properties): cur_image = self.doc.image sourceRect = cur_image.extent() width = sourceRect.size.width height = sourceRect.size.height imageData = objc.allocateBuffer(int(4 * width * height)) bitmapContext = CGBitmapContextCreate(imageData, width,height,8,4 * width,CGImageGetColorSpace(cur_image.cgImageRepresentation()),kCGImageAlphaPremultipliedFirst) ciContext = CIContext.contextWithCGContext_options_(bitmapContext, None) for filter in self.filters: filter.setValue_forKey_(cur_image, 'inputImage') cur_image = filter.valueForKey_('outputImage') rendered = ciContext.createCGImage_fromRect_(cur_image,sourceRect) CGImageDestinationAddImage(dest, rendered, properties) CGImageDestinationFinalize(dest)
def testBuffer(self): b = objc.allocateBuffer(10000) self.assertEqual(len(b), 10000) self.assertIsInstance(b, bytearray)
def testImageData(self): width = 256 height = 256 rPlane = array.array("B") rPlane.fromlist( [y % 256 for y in range(0, height) for x in range(0, width)]) if sys.version_info[0] == 3: buffer = memoryview else: from __builtin__ import buffer rPlane = buffer(rPlane) gPlane = array.array("B") gPlane.fromlist( [y % 256 for y in range(0, height) for x in range(width, 0, -1)]) gPlane = buffer(gPlane) bPlane = array.array("B") bPlane.fromlist( [x % 256 for y in range(0, height) for x in range(0, width)]) bPlane = buffer(bPlane) dataPlanes = (rPlane, gPlane, bPlane, None, None) # test planar, pre-made buffer i1 = NSBitmapImageRep.alloc( ).initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_( dataPlanes, width, height, 8, 3, NO, YES, NSDeviceRGBColorSpace, 0, 0) self.assertTrue(i1) singlePlane = objc.allocateBuffer(width * height * 3) for i in range(0, width * height): si = i * 3 if sys.version_info[0] == 2: singlePlane[si] = rPlane[i] singlePlane[si + 1] = gPlane[i] singlePlane[si + 2] = bPlane[i] else: def as_byte(v): if isinstance(v, int): return v else: return ord(v) singlePlane[si] = as_byte(rPlane[i]) singlePlane[si + 1] = as_byte(gPlane[i]) singlePlane[si + 2] = as_byte(bPlane[i]) dataPlanes = (singlePlane, None, None, None, None) # test non-planar, premade buffer i2 = NSBitmapImageRep.alloc( ).initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_( dataPlanes, width, height, 8, 3, NO, NO, NSDeviceRGBColorSpace, 0, 0) # test grey scale greyPlane = array.array("B") greyPlane.fromlist( [x % 256 for x in range(0, height) for x in range(0, width)]) greyPlanes = (greyPlane, None, None, None, None) greyImage = NSBitmapImageRep.alloc( ).initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_( greyPlanes, width, height, 8, 1, NO, YES, NSCalibratedWhiteColorSpace, width, 8, ) # test planar, NSBIR allocated buffer i3 = NSBitmapImageRep.alloc( ).initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_( None, width, height, 8, 3, NO, YES, NSDeviceRGBColorSpace, 0, 0) r, g, b, a, o = i3.getBitmapDataPlanes_() self.assertTrue(r) self.assertTrue(g) self.assertTrue(b) self.assertTrue(not a) self.assertTrue(not o) self.assertEqual(len(r), len(rPlane)) self.assertEqual(len(g), len(gPlane)) self.assertEqual(len(b), len(bPlane)) r[0:len(r)] = rPlane[0:len(rPlane)] g[0:len(g)] = gPlane[0:len(gPlane)] b[0:len(b)] = bPlane[0:len(bPlane)] bitmapData = i2.bitmapData() self.assertEqual(len(bitmapData), len(singlePlane)) try: memoryview except NameError: self.assertEqual(bitmapData, singlePlane) else: self.assertEqual(bitmapData.tobytes(), singlePlane) a = array.array("L", [255] * 4) self.assertArgIsOut(NSBitmapImageRep.getPixel_atX_y_, 0) d = i2.getPixel_atX_y_(a, 1, 1) self.assertIs(a, d)
def testImageData(self): width = 256 height = 256 rPlane = array.array('B') rPlane.fromlist( [y%256 for y in range(0,height) for x in range(0,width)] ) if sys.version_info[0] == 3: buffer = memoryview else: from __builtin__ import buffer rPlane = buffer(rPlane) gPlane = array.array('B') gPlane.fromlist( [y%256 for y in range(0,height) for x in range(width,0,-1)] ) gPlane = buffer(gPlane) bPlane = array.array('B') bPlane.fromlist( [x%256 for y in range(0,height) for x in range(0,width)] ) bPlane = buffer(bPlane) dataPlanes = (rPlane, gPlane, bPlane, None, None) # test planar, pre-made buffer i1 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(dataPlanes, width, height, 8, 3, NO, YES, NSDeviceRGBColorSpace, 0, 0) self.assertTrue(i1) singlePlane = objc.allocateBuffer(width*height*3) for i in range(0, width*height): si = i * 3 if sys.version_info[0] == 2: singlePlane[si] = rPlane[i] singlePlane[si+1] = gPlane[i] singlePlane[si+2] = bPlane[i] else: def as_byte(v): if isinstance(v, int): return v else: return ord(v) singlePlane[si] = as_byte(rPlane[i]) singlePlane[si+1] = as_byte(gPlane[i]) singlePlane[si+2] = as_byte(bPlane[i]) dataPlanes = (singlePlane, None, None, None, None) # test non-planar, premade buffer i2 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(dataPlanes, width, height, 8, 3, NO, NO, NSDeviceRGBColorSpace, 0, 0) # test grey scale greyPlane = array.array('B') greyPlane.fromlist( [x%256 for x in range(0,height) for x in range(0,width)] ) greyPlanes = (greyPlane, None, None, None, None) greyImage = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(greyPlanes, width, height, 8, 1, NO, YES, NSCalibratedWhiteColorSpace, width, 8) # test planar, NSBIR allocated buffer i3 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(None, width, height, 8, 3, NO, YES, NSDeviceRGBColorSpace, 0, 0) r,g,b,a,o = i3.getBitmapDataPlanes_() self.assertTrue(r) self.assertTrue(g) self.assertTrue(b) self.assertTrue(not a) self.assertTrue(not o) self.assertEqual(len(r), len(rPlane)) self.assertEqual(len(g), len(gPlane)) self.assertEqual(len(b), len(bPlane)) r[0:len(r)] = rPlane[0:len(rPlane)] g[0:len(g)] = gPlane[0:len(gPlane)] b[0:len(b)] = bPlane[0:len(bPlane)] bitmapData = i2.bitmapData() self.assertEqual(len(bitmapData), len(singlePlane)) try: memoryview except NameError: self.assertEqual(bitmapData, singlePlane) else: self.assertEqual(bitmapData.tobytes(), singlePlane) a = array.array('L', [255]*4) self.assertArgIsOut(NSBitmapImageRep.getPixel_atX_y_, 0) d = i2.getPixel_atX_y_(a, 1, 1) self.assertIs(a, d)
def IISaveImage(image, url, width, height): result = False # If there is no image, no destination, or the width/height is 0, then fail early. assert (image is not None) and (url is not None) and (width != 0.0) and (height != 0.0) # Try to create a jpeg image destination at the url given to us imageDest = Quartz.CGImageDestinationCreateWithURL(url, LaunchServices.kUTTypeJPEG, 1, None) if imageDest is not None: # And if we can, then we can start building our final image. # We begin by creating a CGBitmapContext to host our desintation image. # Allocate enough space to hold our pixels imageData = objc.allocateBuffer(int(4 * width * height)) # Create the bitmap context bitmapContext = Quartz.CGBitmapContextCreate( imageData, # image data we just allocated... width, # width height, # height 8, # 8 bits per component 4 * width, # bytes per pixel times number of pixels wide Quartz.CGImageGetColorSpace(image.fImageRef), # use the same colorspace as the original image Quartz.kCGImageAlphaPremultipliedFirst) # use premultiplied alpha # Check that all that went well if bitmapContext is not None: # Now, we draw the image to the bitmap context IIDrawImageTransformed(image, bitmapContext, Quartz.CGRectMake(0.0, 0.0, width, height)) # We have now gotten our image data to the bitmap context, and correspondingly # into imageData. If we wanted to, we could look at any of the pixels of the image # and manipulate them in any way that we desire, but for this case, we're just # going to ask ImageIO to write this out to disk. # Obtain a CGImageRef from the bitmap context for ImageIO imageIOImage = Quartz.CGBitmapContextCreateImage(bitmapContext) # Check if we have additional properties from the original image if image.fProperties is not None: # If we do, then we want to inspect the orientation property. # If it exists and is not the default orientation, then we # want to replace that orientation in the destination file orientation = IIGetImageOrientation(image) if orientation != 1: # If the orientation in the original image was not the default, # then we need to replace that key in a duplicate of that dictionary # and then pass that dictionary to ImageIO when adding the image. prop = CFDictionaryCreateMutableCopy(None, 0, image.fProperties) orientation = 1; prop[Quartz.kCGImagePropertyOrientation] = orientation # And add the image with the new properties Quartz.CGImageDestinationAddImage(imageDest, imageIOImage, prop); else: # Otherwise, the image was already in the default orientation and we can # just save it with the original properties. Quartz.CGImageDestinationAddImage(imageDest, imageIOImage, image.fProperties) else: # If we don't, then just add the image without properties Quartz.CGImageDestinationAddImage(imageDest, imageIOImage, None) del bitmapContext # Finalize the image destination result = Quartz.CGImageDestinationFinalize(imageDest) del imageDest return result
def testImageData(self): width = 256 height = 256 rPlane = array.array('B') rPlane.fromlist( [y%256 for y in range(0,height) for x in range(0,width)] ) rPlane = buffer(rPlane) gPlane = array.array('B') gPlane.fromlist( [y%256 for x in range(0,height) for x in range(width,0,-1)] ) gPlane = buffer(gPlane) bPlane = array.array('B') bPlane.fromlist( [x%256 for x in range(0,height) for x in range(0,width)] ) bPlane = buffer(bPlane) dataPlanes = (rPlane, gPlane, bPlane, None, None) # test planar, pre-made buffer i1 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(dataPlanes, width, height, 8, 3, NO, YES, NSDeviceRGBColorSpace, 0, 0) self.assert_(i1) singlePlane = objc.allocateBuffer(width*height*3) for i in range(0, 256*256): si = i * 3 singlePlane[si] = rPlane[i] singlePlane[si+1] = gPlane[i] singlePlane[si+2] = bPlane[i] dataPlanes = (singlePlane, None, None, None, None) # test non-planar, premade buffer i2 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(dataPlanes, width, height, 8, 3, NO, NO, NSDeviceRGBColorSpace, 0, 0) # test grey scale greyPlane = array.array('B') greyPlane.fromlist( [x%256 for x in range(0,height) for x in range(0,width)] ) greyPlanes = (greyPlane, None, None, None, None) greyImage = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(greyPlanes, width, height, 8, 1, NO, YES, NSCalibratedWhiteColorSpace, width, 8) # test planar, NSBIR allocated buffer i3 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(None, width, height, 8, 3, NO, YES, NSDeviceRGBColorSpace, 0, 0) r,g,b,a,o = i3.getBitmapDataPlanes_() self.assert_(r) self.assert_(g) self.assert_(b) self.assert_(not a) self.assert_(not o) self.assertEquals(len(r), len(rPlane)) self.assertEquals(len(g), len(gPlane)) self.assertEquals(len(b), len(bPlane)) r[0:len(r)] = rPlane[0:len(rPlane)] g[0:len(g)] = gPlane[0:len(gPlane)] b[0:len(b)] = bPlane[0:len(bPlane)] bitmapData = i2.bitmapData() self.assertEquals(len(bitmapData), len(singlePlane)) self.assertEquals(bitmapData, singlePlane) a = array.array('I', [255]*4) self.failUnlessArgIsOut(NSBitmapImageRep.getPixel_atX_y_, 0) d = i2.getPixel_atX_y_(a, 1, 1) self.failUnless(a is d)
def testImageData(self): width = 256 height = 256 rPlane = array.array('B') rPlane.fromlist( [y % 256 for y in range(0, height) for x in range(0, width)]) rPlane = buffer(rPlane) gPlane = array.array('B') gPlane.fromlist( [y % 256 for x in range(0, height) for x in range(width, 0, -1)]) gPlane = buffer(gPlane) bPlane = array.array('B') bPlane.fromlist( [x % 256 for x in range(0, height) for x in range(0, width)]) bPlane = buffer(bPlane) dataPlanes = (rPlane, gPlane, bPlane, None, None) # test planar, pre-made buffer i1 = NSBitmapImageRep.alloc( ).initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_( dataPlanes, width, height, 8, 3, NO, YES, NSDeviceRGBColorSpace, 0, 0) self.assert_(i1) singlePlane = objc.allocateBuffer(width * height * 3) for i in range(0, 256 * 256): si = i * 3 singlePlane[si] = rPlane[i] singlePlane[si + 1] = gPlane[i] singlePlane[si + 2] = bPlane[i] dataPlanes = (singlePlane, None, None, None, None) # test non-planar, premade buffer i2 = NSBitmapImageRep.alloc( ).initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_( dataPlanes, width, height, 8, 3, NO, NO, NSDeviceRGBColorSpace, 0, 0) # test grey scale greyPlane = array.array('B') greyPlane.fromlist( [x % 256 for x in range(0, height) for x in range(0, width)]) greyPlanes = (greyPlane, None, None, None, None) greyImage = NSBitmapImageRep.alloc( ).initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_( greyPlanes, width, height, 8, 1, NO, YES, NSCalibratedWhiteColorSpace, width, 8) # test planar, NSBIR allocated buffer i3 = NSBitmapImageRep.alloc( ).initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_( None, width, height, 8, 3, NO, YES, NSDeviceRGBColorSpace, 0, 0) r, g, b, a, o = i3.getBitmapDataPlanes_() self.assert_(r) self.assert_(g) self.assert_(b) self.assert_(not a) self.assert_(not o) self.assertEquals(len(r), len(rPlane)) self.assertEquals(len(g), len(gPlane)) self.assertEquals(len(b), len(bPlane)) r[0:len(r)] = rPlane[0:len(rPlane)] g[0:len(g)] = gPlane[0:len(gPlane)] b[0:len(b)] = bPlane[0:len(bPlane)] bitmapData = i2.bitmapData() self.assertEquals(len(bitmapData), len(singlePlane)) self.assertEquals(bitmapData, singlePlane) a = array.array('I', [255] * 4) self.failUnlessArgIsOut(NSBitmapImageRep.getPixel_atX_y_, 0) d = i2.getPixel_atX_y_(a, 1, 1) self.failUnless(a is d)
def IISaveImage(image, url, width, height): result = False # If there is no image, no destination, or the width/height is 0, then fail early. assert ((image is not None) and (url is not None) and (width != 0.0) and (height != 0.0)) # Try to create a jpeg image destination at the url given to us imageDest = Quartz.CGImageDestinationCreateWithURL( url, LaunchServices.kUTTypeJPEG, 1, None) if imageDest is not None: # And if we can, then we can start building our final image. # We begin by creating a CGBitmapContext to host our desintation image. # Allocate enough space to hold our pixels imageData = objc.allocateBuffer(int(4 * width * height)) # Create the bitmap context bitmapContext = Quartz.CGBitmapContextCreate( imageData, # image data we just allocated... width, # width height, # height 8, # 8 bits per component 4 * width, # bytes per pixel times number of pixels wide Quartz.CGImageGetColorSpace( image.fImageRef ), # use the same colorspace as the original image Quartz.kCGImageAlphaPremultipliedFirst, ) # use premultiplied alpha # Check that all that went well if bitmapContext is not None: # Now, we draw the image to the bitmap context IIDrawImageTransformed(image, bitmapContext, Quartz.CGRectMake(0.0, 0.0, width, height)) # We have now gotten our image data to the bitmap context, and correspondingly # into imageData. If we wanted to, we could look at any of the pixels of the image # and manipulate them in any way that we desire, but for this case, we're just # going to ask ImageIO to write this out to disk. # Obtain a CGImageRef from the bitmap context for ImageIO imageIOImage = Quartz.CGBitmapContextCreateImage(bitmapContext) # Check if we have additional properties from the original image if image.fProperties is not None: # If we do, then we want to inspect the orientation property. # If it exists and is not the default orientation, then we # want to replace that orientation in the destination file orientation = IIGetImageOrientation(image) if orientation != 1: # If the orientation in the original image was not the default, # then we need to replace that key in a duplicate of that dictionary # and then pass that dictionary to ImageIO when adding the image. prop = CFDictionaryCreateMutableCopy( None, 0, image.fProperties) orientation = 1 prop[Quartz.kCGImagePropertyOrientation] = orientation # And add the image with the new properties Quartz.CGImageDestinationAddImage(imageDest, imageIOImage, prop) else: # Otherwise, the image was already in the default orientation and we can # just save it with the original properties. Quartz.CGImageDestinationAddImage(imageDest, imageIOImage, image.fProperties) else: # If we don't, then just add the image without properties Quartz.CGImageDestinationAddImage(imageDest, imageIOImage, None) del bitmapContext # Finalize the image destination result = Quartz.CGImageDestinationFinalize(imageDest) del imageDest return result
def grab_desktop(self, return_type=0): """ grab desktop screenshot. :type return_type: int :param return_type: 0 for pil image, 1 for color matrix, 2 for gray matrix :rtype: numpy.ndarray or Image.Image :return: the screenshot image """ ret_image = None # Create screenshot as CGImage image = CG.CGWindowListCreateImage( CG.CGRectInfinite, CG.kCGWindowListOptionOnScreenOnly, CG.kCGNullWindowID, CG.kCGWindowImageDefault) width = CG.CGImageGetWidth(image); height = CG.CGImageGetHeight(image); # Allocate enough space to hold our pixels imageData = objc.allocateBuffer(int(4 * width * height)) # Create the bitmap context bitmapContext = CG.CGBitmapContextCreate( imageData, # image data we just allocated... width, # width height, # height 8, # 8 bits per component 4 * width, # bytes per pixel times number of pixels wide CG.CGImageGetColorSpace(image), # use the same colorspace as the original image CG.kCGImageAlphaPremultipliedLast) # use premultiplied alpha CG.CGContextDrawImage(bitmapContext, CG.CGRectMake(0, 0, width, height), image) #Now your rawData contains the image data in the RGBA8888 pixel format. #del bitmapContext ret_image = Image.frombuffer( "RGBA", (width, height), imageData, "raw", "RGBA", 0, 1 ) #return ret_image #ret_image.save('out.jpg') ret_image = ret_image.convert('RGB') #ret_image.save('out.jpg') if return_type == self.get_color_mat: return self._get_cv_color_mat(ret_image) if return_type == self.get_gray_mat: mat = self._get_cv_color_mat(ret_image) return self._get_cv_gray_mat(mat) else: return ret_image