def array2PIL(arr, size): if len(arr.shape) == 3: mode = 'RGBA' arr = arr.reshape(arr.shape[0]*arr.shape[1], arr.shape[2]) if len(arr[0]) == 3: arr = numpy.c_[arr, 255*numpy.ones((len(arr),1), numpy.uint8)] return Image.frombuffer(mode, size, arr.tostring(), 'raw', mode, 0, 1) elif len(arr.shape) == 2: mode = 'L' return Image.frombuffer(mode, size, arr.tostring(), 'raw', mode, 0, 1) else: raise ConversionException("I don't know how to convert this array.")
def takeSnapshot(self, *args, **kwargs): """tango""" if canTakeSnapshots: imgChan = self.getChannelObject("image") rawimg = imgChan.getValue() w = self.getWidth() h = self.getHeight() if len(rawimg) == w*h*3: img_type="RGB" else: img_type="L" try: if kwargs.get("bw", False) and img_type=="RGB": img = Image.frombuffer(img_type, (self.getWidth(), self.getHeight()), rawimg).convert("L") else: img = Image.frombuffer(img_type, (self.getWidth(), self.getHeight()), rawimg) img = img.transpose(Image.FLIP_TOP_BOTTOM) #img.save(*args) except: logging.getLogger("HWR").exception("%s: could not save snapshot", self.name()) else: if len(args): try: img.save(*args) except: logging.getLogger("HWR").exception("%s: could not save snapshot", self.name()) else: return True else: return img else: logging.getLogger("HWR").error("%s: could not take snapshot: sorry PIL is not available :-(", self.name()) return False
def takeSnapshot(self, *args): """taco""" if canTakeSnapshots: rawimg = self.device.DevCcdRead(1) try: img = Image.frombuffer( "RGB", (self.getWidth(), self.getHeight()), rawimg) pixmap = img.tostring("raw", "BGR") img = Image.frombuffer("RGB", img.size, pixmap) # img.save(*args) except BaseException: logging.getLogger("HWR").exception( "%s: could not save snapshot", self.name()) else: if len(args): try: img.save(*args) except BaseException: logging.getLogger("HWR").exception( "%s: could not save snapshot", self.name()) else: return True else: return img else: logging.getLogger("HWR").error( "%s: could not take snapshot: sorry PIL is not available :-(", self.name(), ) return False
def main(session): """ This example uses the explore method. """ # Get the services ALNavigation and ALMotion. navigation_service = session.service("ALNavigation") motion_service = session.service("ALMotion") # Wake up robot motion_service.wakeUp() # Explore the environement, in a radius of 2 m. radius = 8.0 error_code = navigation_service.explore(radius) if error_code != 0: print "Exploration failed." return # Saves the exploration on disk path = navigation_service.saveExploration() print "Exploration saved at path: \"" + path + "\"" # Start localization to navigate in map navigation_service.startLocalization() # Come back to initial position navigation_service.navigateToInMap([0., 0., 0.]) # Stop localization navigation_service.stopLocalization() # Retrieve and display the map built by the robot result_map = navigation_service.getMetricalMap() map_width = result_map[1] map_height = result_map[2] img = numpy.array(result_map[4]).reshape(map_width, map_height) img = (100 - img) * 2.55 # from 0..100 to 255..0 img = numpy.array(img, numpy.uint8) Image.frombuffer('L', (map_width, map_height), img, 'raw', 'L', 0, 1).show()
def _getRectAsImage(rect): import Image dc, bitmap = getDCAndBitMap(rect=rect) try: bmpInfo = bitmap.GetInfo() # bmpInfo is something like { # 'bmType': 0, 'bmWidthBytes': 5120, 'bmHeight': 1024, # 'bmBitsPixel': 32, 'bmPlanes': 1, 'bmWidth': 1280} ##print bmpInfo size = (bmpInfo['bmWidth'], bmpInfo['bmHeight']) if bmpInfo['bmBitsPixel'] == 32: # Use GetBitmapBits and BGRX if the bpp == 32, because # it's ~15% faster than the method below. data = bitmap.GetBitmapBits(True) # asString=True return Image.frombuffer( 'RGB', size, data, 'raw', 'BGRX', 0, 1) else: # If bpp != 32, we cannot use GetBitmapBits, because it # does not return a 24/32-bit image when the screen is at # a lower color depth. try: data, size = getBGR32(dc, bitmap) except DIBFailed, e: raise GrabFailed("getBGR32 failed. Error was " + str(e)) # BGR, 32-bit line padding, origo in lower left corner return Image.frombuffer( 'RGB', size, data, 'raw', 'BGR', (size[0] * 3 + 3) & -4, -1) finally: deleteDCAndBitMap(dc, bitmap)
def extract(self, source): """Extract an image from *source*. If the image is supported an instance of PIL's Image is returned, otherwise None. """ p = Parser() f = open_pds(source) if self.log: self.log.debug("Parsing '%s'" % (source)) self.labels = p.parse(f) if self.log: self.log.debug("Found %d labels" % (len(self.labels))) if self._check_image_is_supported(): if self.log: self.log.debug("Image in '%s' is supported" % (source)) dim = self._get_image_dimensions() loc = self._get_image_location() imageSampleBits = int(self.labels['IMAGE']['SAMPLE_BITS']) imageSampleType = self.labels['IMAGE']['SAMPLE_TYPE'] md5Checksum = self._get_image_checksum() if self.log: self.log.debug("Image dimensions should be %s" % (str(dim))) if self.log: self.log.debug("Seeking to image data at %d" % (loc)) f.seek(loc) if imageSampleBits == 8: readSize = dim[0] * dim[1] elif imageSampleBits == 16: readSize = dim[0] * dim[1] * 2 print readSize if self.log: self.log.debug("Seek successful, reading data (%s)" % (readSize)) # rawImageData = f.readline() # f.seek(-int(self.labels["RECORD_BYTES"]), os.SEEK_CUR) rawImageData = f.read(readSize) if md5Checksum: rawImageChecksum = hashlib.md5(rawImageData).hexdigest() checksumVerificationPassed = rawImageChecksum == md5Checksum and True or False if not checksumVerificationPassed: if self.log: self.log.debug("Secure hash verification failed") if self.raisesChecksumError: errorMessage = "Verification failed! Expected '%s' but got '%s'." % (md5Checksum, rawImageChecksum) raise ChecksumError, errorMessage else: if self.log: self.log.debug("Secure hash verification passed") if self.log: self.log.debug("Read successful (len: %d), creating Image object" % (len(rawImageData))) # The frombuffer defaults may change in a future release; # for portability, change the call to read: # frombuffer(mode, size, data, 'raw', mode, 0, 1). if (imageSampleBits == 16) and imageSampleType == ('MSB_INTEGER'): #img = Image.frombuffer('I', dim, rawImageData, 'raw', 'I;16BS', 0, 1) img = Image.frombuffer('F', dim, rawImageData, 'raw', 'F;16B', 0, 1) img = ImageMath.eval("convert(a/16.0, 'L')", a=img) else: img = Image.frombuffer('L', dim, rawImageData, 'raw', 'L', 0, 1) if self.log: self.log.debug("Image result: %s" % (str(img))) self.log.debug("Image info: %s" % (str(img.info))) self.log.debug("Image mode: %s" % (str(img.mode))) self.log.debug("Image size: %s" % (str(img.size))) else: if self.log: self.log.error("Image is not supported '%s'" % (source)) img = None f.close() return img, self.labels
def takeSnapshot(self, *args): """taco""" if canTakeSnapshots: rawimg = self.device.DevCcdRead(1) try: img = Image.frombuffer( "RGB", (self.getWidth(), self.getHeight()), rawimg ) pixmap = img.tostring("raw", "BGR") img = Image.frombuffer("RGB", img.size, pixmap) # img.save(*args) except BaseException: logging.getLogger("HWR").exception( "%s: could not save snapshot", self.name() ) else: if len(args): try: img.save(*args) except BaseException: logging.getLogger("HWR").exception( "%s: could not save snapshot", self.name() ) else: return True else: return img else: logging.getLogger("HWR").error( "%s: could not take snapshot: sorry PIL is not available :-(", self.name(), ) return False
def saveScreenshot(self): filename = str( QtGui.QFileDialog.getSaveFileName(self, 'Select file to save to')) h = self.widget.size().height() w = self.widget.size().width() screenshot = glReadPixels(0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE) Image.frombuffer("RGBA", (w, h), screenshot, "raw", "RGBA", 0, 0).save(filename)
def saveScreenshot(self): filename = str(QtGui.QFileDialog.getSaveFileName(self, 'Select file to save to')) h = self.widget.size().height() w = self.widget.size().width() screenshot = glReadPixels( 0,0, w, h, GL_RGBA, GL_UNSIGNED_BYTE) Image.frombuffer("RGBA", (w,h), screenshot, "raw", "RGBA", 0, 0).save(filename)
def display(self): glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) glMatrixMode(GL_PROJECTION) glLoadIdentity() w, h = glGetFloatv(GL_VIEWPORT)[2:4] aspect = w / h glFrustum(-1.0*aspect, 1.0*aspect, -1.0, 1.0, 1.0, 100.0) key = self.keyboard.pressed() if key == chr(27) or key == 'q' or key == 'Q': sys.exit(0) elif key == 's' or key == 'S': self.snap = True if not os.path.exists('tmp'): os.makedirs('tmp') elif key == 'r' or key == 'R': if not self.record: self.record = True if not os.path.exists('tmp'): os.makedirs('tmp') else: self.record = False glTranslate(0.0, 0.0, -self.camera.dist) glRotate(self.camera.tilt, 1.0, 0.0, 0.0) glRotate(self.camera.rot, 0.0, 1.0, 0.0) self.mouse.apply() if self.physics and self.physics.follow: focus = self.physics.follow.getRelPointPos((0, 0, 0)) glTranslated(-focus[0], -focus[1], -focus[2]) glMatrixMode(GL_MODELVIEW) glLoadIdentity() glPushMatrix() if self.scene: self.scene.draw() if self.physics: self.physics.draw() glPopMatrix() glFlush() if self.record: if self.smp % self.sampl == 0: buf = glReadPixels(0, 0, w, h, GL_RGB, GL_UNSIGNED_BYTE) img = Image.frombuffer('RGB', (w, h), buf, 'raw', 'RGB', 0, -1) img.thumbnail((250, 250), Image.ANTIALIAS) img.save("tmp/frame%05d.png" % self.frm) self.frm += 1 self.smp += 1 if self.snap: buf = glReadPixels(0, 0, w, h, GL_RGB, GL_UNSIGNED_BYTE) img = Image.frombuffer('RGB', (w, h), buf, 'raw', 'RGB', 0, -1) img.save("tmp/snap%03d.png" % self.snp) self.snap = False self.snp += 1
def __init__(self, fileType, fileBody): self.size, self.width, self.height = struct.unpack("3i", fileBody[:12]) self.image = fileBody[12 : 12 + self.size] if fileType == 0x10: self.palette = fileBody[12 + self.size : 12 + self.size + 3*256] self.image = Image.frombuffer("RGB", (self.width, self.height), self.image, "raw", "P", 0, 1) self.image.putpalette(self.palette) elif fileType == 0x11: self.palette = '' image = '' for i in range(0, len(self.image), 3): image += self.image[i + 2] + self.image[i + 1] + self.image[i] self.image = image self.image = Image.frombuffer('RGB', (self.width, self.height), self.image, 'raw', 'RGB', 0, 1)
def main(): I = Image.open(file1) I2 = Image.open(file2) i1 = np.array(I) i2 = np.array(I2) diff = np.float64(i1)-i2 I = Image.frombuffer('L',(diff.shape), (np.asarray(diff)).astype(np.uint8) ,'raw','L',0,1) #I = np.array print diff.sum() I.save('file3.png') print np.asarray(diff<np.float64(10)).astype(np.uint8)#.sum() I2 = Image.frombuffer('L',(diff.shape), 255*np.asarray(diff<np.float64(5)).astype(np.uint8) ,'raw','L',0,1) I2.save('file3t.png')
def _bgra_surf_to_rgba_string(self): img = Image.frombuffer( 'RGBA', (self.cairo_surface.get_width(), self.cairo_surface.get_height()), self.cairo_surface.get_data(), 'raw', 'BGRA', 0, 1) return img.tostring('raw', 'RGBA', 0, 1)
def _capture_interesting_frame(self, pad, buffer): """ this is the buffer probe which processes every frame that is being played by the capture pipeline. since the first frame is not random, we skip this frame by setting the self.first_analyze flag. if the current frame is found intersting we save it. if not we decrease self.tries to limit the number of tries """ if not self.first_analyze: #get current buffer's capabilities caps = buffer.get_caps () #we are interested in it's dimension height, width = caps[0]['height'], caps[0]['width'] #using PIL we grab the image in raw RGB mode from the buffer data im = Image.frombuffer('RGB', (width, height), buffer.data,'raw', 'RGB', 0, 1) #here we check the standard variance of a grayscale version of the #current frame against the BORING_IMAGE_VARIANCE if ImageStat.Stat(ImageOps.grayscale(im)).var[0] > \ BORING_IMAGE_VARIANCE: #success! save our interesting image self.image = im else: #the image is just useless... retry... self.tries -= 1 else: self.first_analyze = False return True
def DatatoImage(self): #, data, size, mode): """convert raw data to image""" #x = debugtime() width, height = self.im_size d_size = (int(width*self.scale), int(height*self.scale)) data = self.data.flatten() #x.add('flatten') if self.imbuff is None or d_size != self.d_size or self.im_mode == 'L': try: self.imbuff = Image.frombuffer(self.im_mode, self.im_size, data, 'raw', self.im_mode, 0, 1) #x.add('made image') except: return self.d_size = d_size = (int(width*self.scale), int(height*self.scale)) if self.imbuff.size != d_size: self.imbuff = self.imbuff.resize(d_size) #x.add('resized imbuff') if self.wximage.GetSize() != self.imbuff.size: self.wximage = wx.EmptyImage(d_size[0], d_size[1]) #x.add('created wximage %s ' % (repr(self.wximage.GetSize()))) if self.im_mode == 'L': self.wximage.SetData(self.imbuff.convert('RGB').tostring()) elif self.im_mode == 'RGB': data.shape = (3, width, height) self.wximage = wx.ImageFromData(width, height, data) #x.add('set wx image wximage : %i, %i ' % d_size) self.image.SetValue(self.wximage)
def open_svg_as_image(fn, width, height): for i in range(10): try: tmpfd, tmppath = tempfile.mkstemp(".png") tmpfile = os.fdopen(tmpfd,'w') file = StringIO.StringIO() svgsurface = cairo.SVGSurface (file, width, height) svgctx = cairo.Context(svgsurface) svg = rsvg.Handle(file=fn) svgwidth = svg.get_property('width') svgheight = svg.get_property('height') svgctx.scale(width/float(svgwidth),height/float(svgheight)) svg.render_cairo(svgctx) svgsurface.write_to_png(tmpfile) svgsurface.finish() tmpfile.close() tmpfile = open(tmppath, 'r') imgsurface = cairo.ImageSurface.create_from_png(tmpfile) imgwidth = imgsurface.get_width() imgheight = imgsurface.get_height() data = imgsurface.get_data() im = Image.frombuffer("RGBA",(imgwidth, imgheight), data ,"raw","RGBA",0,1) os.remove(tmppath) break except MemoryError: print 'Memory Error. Try again ...' continue else: raise Exception('Problem loading image {0}'.format(fn)) return im
def save(self, data): if _im is None: raise NotImplementedError d = data[0] if isinstance(d, _RGB): s = list(d.shape) s.append(3) # c = _core.ndarray(s, buffer=d.data, dtype=_core._uint8) c = _core.zeros(s, dtype=_core._uint8) # print d.red c[...,0] = d.get_red(dtype=_core._uint8) c[...,1] = d.get_green(dtype=_core._uint8) c[...,2] = d.get_blue(dtype=_core._uint8) d = c try: if d.dtype == _core.int64: im = _im.fromarray(d, mode='I') else: im = _im.fromarray(d) except: if d.dtype == _core._uint16: # trap a known PIL 1.1.7 TIFF bug im = _im.frombuffer("I;16", tuple(reversed(d.shape)), d.data, 'raw', "I;16", 0, 1) else: raise im.save(self.name)
def getImage(): mode = 'RGBA' size = (1080, 1920) f = open('data/screenshot.raw', 'rb') f.read(12) im = Image.frombuffer(mode, size, f.read(), "raw", mode, 0, 1) return im
def get(self): print "the get request", self.request # parse the arguments #z=self.get_argument('z') # the usable parameters posted are: # x, y, dx : tileWidth, dy : tileHeight, # scale : scale, // defined as 1/2**zoomlevel # z : z # everything in bitmap pixel coordinates # create an example PNG w, h = 256, 256 img = np.empty((w, h), np.uint32) img.shape = h, w img[0, 0] = 0x800000FF img[:100, :100] = 0xFFFF0000 pilImage = Image.frombuffer('RGBA', (w, h), img, 'raw', 'RGBA', 0, 1) imgbuff = cStringIO.StringIO() pilImage.save(imgbuff, format='PNG') imgbuff.seek(0) self.set_header('Content-Type', 'image/png') self.write(imgbuff.read()) imgbuff.close() self.flush()
def to_image(im): data = im.get_sub_data(None, ((0,0), im.size), im.format, im.stride) try: mode, raw_mode = mode_table[im.format] except TypeError: raise NotImplementedError return Image.frombuffer(mode, im.size, data, "raw", raw_mode, 0, -1)
def array2PIL(arr, size): #mode = 'RGBA' mode = 'L' arr = arr.reshape(arr.shape[0]*arr.shape[1], arr.shape[2]) if len(arr[0]) == 3: arr = numpy.c_[arr, 255*numpy.ones((len(arr),1), numpy.uint8)] return Image.frombuffer(mode, size, arr.tostring(), 'raw', mode, 0, 1)
def DatatoImage(self): #, data, size, mode): """convert raw data to image""" #x = debugtime() width, height = self.im_size d_size = (int(width * self.scale), int(height * self.scale)) data = self.data.flatten() #x.add('flatten') if self.imbuff is None or d_size != self.d_size or self.im_mode == 'L': try: self.imbuff = Image.frombuffer(self.im_mode, self.im_size, data, 'raw', self.im_mode, 0, 1) #x.add('made image') except: return self.d_size = d_size = (int(width * self.scale), int(height * self.scale)) if self.imbuff.size != d_size: self.imbuff = self.imbuff.resize(d_size) #x.add('resized imbuff') if self.wximage.GetSize() != self.imbuff.size: self.wximage = wx.EmptyImage(d_size[0], d_size[1]) #x.add('created wximage %s ' % (repr(self.wximage.GetSize()))) if self.im_mode == 'L': self.wximage.SetData(self.imbuff.convert('RGB').tostring()) elif self.im_mode == 'RGB': data.shape = (3, width, height) self.wximage = wx.ImageFromData(width, height, data) #x.add('set wx image wximage : %i, %i ' % d_size) self.image.SetValue(self.wximage)
def extract(self, source): """Extract an image from *source*. If the image is supported an instance of PIL's Image is returned, otherwise None. """ p = Parser() f = open_pds(source) if self.log: self.log.debug("Parsing '%s'" % (source)) self.labels = p.parse(f) if self.log: self.log.debug("Found %d labels" % (len(self.labels))) if self._check_image_is_supported(): if self.log: self.log.debug("Image in '%s' is supported" % (source)) dim = self._get_image_dimensions() loc = self._get_image_location() if self.log: self.log.debug("Image dimensions should be %s" % (str(dim))) if self.log: self.log.debug("Seeking to image data at %d" % (loc)) f.seek(loc) if self.log: self.log.debug("Seek successful, reading data") # rawImageData = f.readline() # f.seek(-int(self.labels["RECORD_BYTES"]), os.SEEK_CUR) rawImageData = f.read(dim[0] * dim[1]) if self.log: self.log.debug("Read successful (len: %d), creating Image object" % (len(rawImageData))) # The frombuffer defaults may change in a future release; # for portability, change the call to read: # frombuffer(mode, size, data, 'raw', mode, 0, 1). img = Image.frombuffer('L', dim, rawImageData, 'raw', 'L', 0, 1) if self.log: self.log.debug("Image result: %s" % (str(img))) if self.log: self.log.debug("Image info: %s" % (str(img.info))) if self.log: self.log.debug("Image size: %s" % (str(img.size))) else: if self.log: self.log.error("Image is not supported '%s'" % (source)) img = None f.close() return img, self.labels
def save_tiff(self, data): data = data.T image = Image.frombuffer("L" ,data.shape ,data.data, 'raw', 'L', 0 ,1) fd, filename = tempfile.mkstemp(suffix = ".tiff") with os.fdopen(fd, 'w') as fp: image.save(fp = fp, format = "tiff") return filename
def img2json(msg, topic_type): for dim in msg.uint8_data.layout.dim: if dim.label == "width": width = dim.size elif dim.label == "height": height = dim.size if topic_type == "sensor_msgs/CompressedImage": data = msg.uint8_data.data elif topic_type == "sensor_msgs/Image": if msg.encoding == "mono": encoding = "L" elif msg.encoding == "rgb": encoding = "RGB" encoding = "L" img = Image.frombuffer(encoding, (width, height), msg.uint8_data.data) img = img.transpose(Image.FLIP_TOP_BOTTOM) buf = StringIO.StringIO() img.save(buf, format='JPEG') data = buf.getvalue() data = base64.b64encode(data) msg = '{' msg += ' "width" : "%d",' % width msg += ' "height" : "%d",' % height msg += ' "data" : "data:image/jpeg;base64,' + data + '"' msg += '}' return msg
def paint(self, d=None): T = self.w H = self.h # default resolution if d == None: d = self.w / 100.0 slices = self.thick ymesh = [] y = 0 for h in slices: y += h ymesh += [y] ymesh = np.array(ymesh) ny = int(ymesh[-1] / float(d)) - 1 nx = int(T / float(d)) - 1 data = np.zeros([nx, ny, 3], dtype=np.uint8) y = d for i in range(ny): n_layer = ymesh.searchsorted(y) x = d for k in range(nx): data[k, i, :] = self.getcolor(n_layer, x) x = x + d y = y + d return Image.frombuffer('RGB', (ny, nx), data, 'raw', 'RGB', 0, 1).transpose(2)
def imageToMask(image, frameNumber=-1): #take in a resouce image and return a mask of that image. #Store that mask in a dict refrenced by source frame number #so dont have to regenerate it each time. if frameNumber in self.masks: return masks[frameNumber] rows = image.size[1] cols = image.size[0] p= 0 emptyPixel = (0,0,0,255) #mask = Image.new("RGBA", image.size,(255,255,255,255)) #pmask = Image.new("RGBA", (1,1), (0,0,0,0)); pixels = image.getdata() m = [] for p in pixels: if(p[0] == p[1] == p[2] == 0): m.append(0x00) else: m.append(0xff) maskData = bytearray(m) mask = Image.frombuffer("L", image.size, maskData) self.mask = mask.transpose(Image.ROTATE_180).transpose(Image.FLIP_LEFT_RIGHT) if frameNumber != -1: masks[frameNumber] = self.mask return mask
def upload(): log.info("received post request") global counter try: pic = request.forms['picture'] width = request.forms['width'] try: pic = base64.b64decode(pic) except TypeError as e: log.error("Base 64 encoding failed!") log.error(e) return "ERROR 1" size = (int(width), len(pic) / (3 * int(width))) log.debug("length of picture %d size: %s" % (len(size), repr(size))) img = Image.frombuffer('RGB', size, pic, 'raw', 'RGB', 0, 1) title = 'photo-' + str(counter) filename = title + ".jpg" counter += 1 log.debug("storing picture") img.save(title + ".jpg", 'JPEG') content_type, data = encode_multipart_formdata(filename, title) result, dur = post_data(URL, BUCKET_ID, SECRET_TOKEN, content_type, data) log.info("Uploading the picture took %d seconds" % dur) log.debug(str(result)) return "OK green" except Exception as e: log.error("post failed") log.error(e) return "ERROR 2"
def print_numbers(frame, counter, format, fontsize): """Generates an image that serves as a test pattern for encoding/decoding and accuracy tests.""" try: # if PIL is installed this works: import Image, ImageFont, ImageDraw except ImportError: # if Pillow is installed, this works better: from PIL import Image, ImageFont, ImageDraw _, height, width = frame.shape # text at the center, indicating the frame number text = format % counter dim = min(width, height) font = ImageFont.truetype(DEFAULT_FONT, fontsize) (text_width, text_height) = font.getsize(text) x_pos = int((width - text_width) / 2) y_pos = int((height - text_height) / 2) # this is buggy in Pillow-2.0.0, so we do it manually #img = Image.fromarray(frame.transpose(1,2,0)) # replace fromstring() to frombytes() as fromstring is depricated in Pillow since version 2.0 # img = Image.fromstring('RGB', (frame.shape[1], frame.shape[2]), frame.transpose(1,2,0).tostring()) # img = Image.frombytes('RGB', (frame.shape[1], frame.shape[2]), frame.transpose(1,2,0).tostring()) #For some reason there is no frombytes in UBUNTU 12 04 img = Image.frombuffer('RGB', (frame.shape[1], frame.shape[2]), frame.transpose(1,2,0).tostring(), 'raw', "RGB", 0,1 ) #This call seems weird, but I follow the instructions from here (http://pillow.readthedocs.org/en/3.0.x/reference/Image.html#PIL.Image.frombuffer). Following these instructions I don't get a warning draw = ImageDraw.Draw(img) draw.text((x_pos, y_pos), text, font=font, fill=(255,255,255)) return numpy.asarray(img).transpose(2,0,1)
def pixbuf_to_pil(pixbuf): """Return a PIL image created from <pixbuf>.""" dimensions = pixbuf.get_width(), pixbuf.get_height() stride = pixbuf.get_rowstride() pixels = pixbuf.get_pixels() mode = pixbuf.get_has_alpha() and 'RGBA' or 'RGB' return Image.frombuffer(mode, dimensions, pixels, 'raw', mode, stride, 1)
def as_image(self): """ This returns a screenshot of the window, and should work even when the window is not visible on the screen, i.e., positioned offscreen, or obscured by another window. (Does not appear to work for minimized windows, though - it just returns a blank image.) @return: Image.Image """ # technique taken from: # http://stackoverflow.com/questions/19695214 selfDC = self._makeDC() saveDC = selfDC.CreateCompatibleDC() saveBmp = win32ui.CreateBitmap() saveBmp.CreateCompatibleBitmap(selfDC, self.width, self.height) saveDC.SelectObject(saveBmp) # 0=whole window, 1=client area windll.user32.PrintWindow(self.hwnd, saveDC.GetSafeHdc(), 1) bmpInfo = saveBmp.GetInfo() bmpBits = saveBmp.GetBitmapBits(True) img = Image.frombuffer('RGB', (bmpInfo['bmWidth'], bmpInfo['bmHeight']), bmpBits, 'raw', 'BGRX', 0, 1) win32gui.DeleteObject(saveBmp.GetHandle()) saveDC.DeleteDC() selfDC.DeleteDC() win32gui.ReleaseDC(self.hwnd, self.hwndDC) return img
def show(bufferData): global W_zoom, H_zoom, V_value, H_value, lx, ly, downFlag, saveFlag, saveNum image = Image.frombuffer("L", (Width, Height), bufferData) img = np.array(image) height, width = img.shape[:2] img2 = cv2.cvtColor(img, COLOR_BYTE2RGB) if args.name and capture: cv2.imwrite(args.name + "." + args.type, img2) if not args.show: return if saveFlag: saveFlag = False saveNum += 1 name = "" if "bmp" == args.type: name = str(saveNum) + ".bmp" if "png" == args.type: name = str(saveNum) + ".png" if "jpg" == args.type: name = str(saveNum) + ".jpg" cv2.imwrite(name, img2) M = np.float32([[1, 0, lx + H_value], [0, 1, ly + V_value]]) img3 = cv2.warpAffine(img2, M, (width, height)) img4 = cv2.resize(img3, (width + W_zoom, height + H_zoom), interpolation=cv2.INTER_CUBIC) cv2.imshow("AR0134", img4) if capture: cv2.waitKey(100) else: cv2.waitKey(1)
def read_mk(fobj, xxx_todo_changeme3, size): # Alpha masks seem to be uncompressed (start, length) = xxx_todo_changeme3 fobj.seek(start) band = Image.frombuffer("L", size, fobj.read(size[0] * size[1]), "raw", "L", 0, 1) return {"A": band}
def make_image_plots_detail(self, ray_sim): normal = ray_sim.sim.tracing_impacts self.R.setTransform(ray_sim.detector.raw.focus()[0]) self.R.title( "%.3f bandpass + %.3f degrees mosaicity (full widths); perfect optics" % (ray_sim.sim.bandpass, ray_sim.sim.mosaicity) + "\nEnergy %4.1f KeV; Detector distance %6.1f mm; Limiting resolution %6.2f Angstrom" % ((12.398 / (ray_sim.camera.lambda0 * 1E10)), ray_sim.camera.distance * 1000., ray_sim.structure.limiting_resolution)) data_array = 255 - ray_sim.image import numpy import Image imageout = Image.frombuffer( "L", data_array.focus(), data_array.as_numpy_array().astype(numpy.uint8).tostring(), "raw", "L", 0, 1) self.R.c.drawInlineImage(imageout, x=2 * cm, y=9 * cm, width=15 * cm, height=15 * cm) self.R.c.showPage() return self
def getSnapshot(self): """ snapShot() -> iplImg, (cameraPos6D, headAngles) Take a snapshot from the current subscribed video feed. """ # Get camPos # getPosition(name, space={0,1,2}, useSensorValues) camPos = self.globals.motProxy.getPosition("CameraBottom", 2, True) headAngles = self.globals.motProxy.getAngles(["HeadPitch", "HeadYaw"], True) # Get image # shot[0]=width, shot[1]=height, shot[6]=image-data shot = self.globals.vidProxy.getImageRemote("python_GVM") size = (shot[0], shot[1]) picture = Image.frombuffer("RGB", size, shot[6], "raw", "BGR", 0, 1) #create a open cv image of the snapshot image = cv.CreateImageHeader(size, cv.IPL_DEPTH_8U, 3) cv.SetData(image, picture.tostring(), picture.size[0] * 3) hsvImage = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3) cv.CvtColor(image, hsvImage, cv.CV_BGR2HSV) return (hsvImage, (camPos, headAngles))
def ShowSourceSlide(self): if os.path.exists(self.filename): arr = HamaWrapper.getSlideImage(self.filename) im = Image.frombuffer("RGB", (arr.shape[2], arr.shape[1]), arr.data) im.show() else: print "Error: File not found!"
def __init__(self, w, h, *elements, **kwargs): self._elements = [] self._namedElements = {} data = (ctypes.c_ubyte * w * h * 4)() pitch = w * 4 surface = cairo.ImageSurface.create_for_data (data, cairo.FORMAT_ARGB32, w, h, pitch); context = cairo.Context(surface) self.surface = surface self.context = context self.size = w, h self.pitch = pitch for e in elements: if issubclass(e.__class__, UiElement): self.add(e) e.drawContext() imgdata = Image.frombuffer('RGBA', (w, h), surface.get_data(), 'raw', 'BGRA', 0, 1).tostring('raw', 'RGBA', 0, 1) img = image.create(w, h) img.set_data('RGBA', pitch, imgdata) super(CairoUI, self).__init__(img) self.needRedraw = False self.hasPointer = False self.interactive = kwargs.get('interactive', True) self.schedule(self.drawElements)
def camera(): global timestamps, images rospy.init_node("AR0134") pub = rospy.Publisher("/cam0/image_raw", RosImage, queue_size=1000) print(1) thread.start_new_thread(readThread, (), ) print(2) seq = 0 while not rospy.is_shutdown(): if not timestamps.empty(): #print(777) timestamp = timestamps.get() data = images.get() rawImage = Image.frombuffer("L", (Width, Height), data, "raw", "L", 0, 1) arrayImage = np.array(rawImage) cvImage = cv2.cvtColor(arrayImage, COLOR_BYTE2RGB) #cv2.imshow("AR0134",cvImage) #cv2.waitKey(1) rosImage = CvBridge().cv2_to_imgmsg(cvImage, "bgr8") rosImage.header.stamp = timestamp rosImage.header.frame_id = "cam0" rosImage.header.seq = seq seq = seq + 1 #print(4) pub.publish(rosImage)
def onSaveImage(self, event=None): "prompts for and save image to file" defdir = os.getcwd() self.fname = "Image_%i.tiff" % self.ad_cam.ArrayCounter_RBV dlg = wx.FileDialog(None, message='Save Image as', defaultDir=os.getcwd(), defaultFile=self.fname, style=wx.SAVE) path = None if dlg.ShowModal() == wx.ID_OK: path = os.path.abspath(dlg.GetPath()) dlg.Destroy() if path is not None and self.data is not None: Image.frombuffer(self.im_mode, self.im_size, self.data.flatten(), 'raw', self.im_mode, 0, 1).save(path)
def __handleRawRect(self, x, y, w, h, data): if self.debug >= DebugSaveFBUpdate: f = open("tmpfb.raw", "w") f.write(data) f.close() rimage = Image.frombuffer("RGBX", (w, h), data, "raw", "RGBX", 0, 1) self.image.paste(rimage, (x,y))
def get(self): print "the get request", self.request # parse the arguments #z=self.get_argument('z') # the usable parameters posted are: # x, y, dx : tileWidth, dy : tileHeight, # scale : scale, // defined as 1/2**zoomlevel # z : z # everything in bitmap pixel coordinates # create an example PNG w,h=256,256 img = np.empty((w,h),np.uint32) img.shape=h,w img[0,0]=0x800000FF img[:100,:100]=0xFFFF0000 pilImage = Image.frombuffer('RGBA',(w,h),img,'raw','RGBA',0,1) imgbuff = cStringIO.StringIO() pilImage.save(imgbuff, format='PNG') imgbuff.seek(0) self.set_header('Content-Type', 'image/png') self.write(imgbuff.read()) imgbuff.close() self.flush()
def __init__(self, w, h, *elements, **kwargs): self._elements = [] self._namedElements = {} data = (ctypes.c_ubyte * w * h * 4)() pitch = w * 4 surface = cairo.ImageSurface.create_for_data(data, cairo.FORMAT_ARGB32, w, h, pitch) context = cairo.Context(surface) self.surface = surface self.context = context self.size = w, h self.pitch = pitch for e in elements: if issubclass(e.__class__, UiElement): self.add(e) e.drawContext() imgdata = Image.frombuffer('RGBA', (w, h), surface.get_data(), 'raw', 'BGRA', 0, 1).tostring('raw', 'RGBA', 0, 1) img = image.create(w, h) img.set_data('RGBA', pitch, imgdata) super(CairoUI, self).__init__(img) self.needRedraw = False self.hasPointer = False self.interactive = kwargs.get('interactive', True) self.schedule(self.drawElements)
def save_tiff(self, data): data = data.T image = Image.frombuffer("L", data.shape, data.data, 'raw', 'L', 0, 1) fd, filename = tempfile.mkstemp(suffix=".tiff") with os.fdopen(fd, 'w') as fp: image.save(fp=fp, format="tiff") return filename
def screen_grab(box=None): if not box: box = (X_PAD+1, Y_PAD+1, X_PAD+WIDTH, Y_PAD+HEIGHT) w, h = box[2] - box[0], box[3] - box[1] hwnd = win32gui.GetDesktopWindow() #print hwnd wDC = win32gui.GetWindowDC(hwnd) dcObj=win32ui.CreateDCFromHandle(wDC) cDC=dcObj.CreateCompatibleDC() dataBitMap = win32ui.CreateBitmap() dataBitMap.CreateCompatibleBitmap(dcObj, w, h) cDC.SelectObject(dataBitMap) cDC.BitBlt((0,0),(w, h) , dcObj, (box[0], box[1]), win32con.SRCCOPY) #import pdb; pdb.set_trace() bmpinfo = dataBitMap.GetInfo() bmpstr = dataBitMap.GetBitmapBits(True) im = Image.frombuffer( 'RGB', (bmpinfo['bmWidth'], bmpinfo['bmHeight']), bmpstr, 'raw', 'BGRX', 0, 1) #dataBitMap.SaveBitmapFile(cDC, os.getcwd() + '\\plot_snap__' + str(int(time.time())) + '.bmp') dcObj.DeleteDC() cDC.DeleteDC() win32gui.ReleaseDC(hwnd, wDC) return im
def save_screen(self,fn): """Saves a screenshot""" import Image screenshot = glReadPixels( 0,0, self.width, self.height, GL_RGBA, GL_UNSIGNED_BYTE) im = Image.frombuffer("RGBA", (self.width, self.height), screenshot, "raw", "RGBA", 0, 0) print "Saving screen to",fn im.save(fn)
def _read_image(self): """ Returns an image obtained from the robot's camera. For communication issues you only can get 1 image per second :return: The image in PIL format :rtype: PIL Image """ # Thanks to http://www.dailyenigma.org/e-puck-cam.shtml for # the code for get the image from the camera msg = struct.pack(">bb", - ord("I"), 0) try: n = self._send(msg) self._debug("Reading Image: sending " + repr(msg) + " and " + str(n) + " bytes") # We have to add 3 to the size, because with the image we # get "mode", "width" and "height" size = self._cam_size + 3 img = self._recv(size) while len(img) != size: img += self._recv(size) # Create the PIL Image image = Image.frombuffer("RGB", (self._cam_width, self._cam_height), img, "raw", "BGR;16", 0, 1) image = image.rotate(180) self._pil_image = image except Exception, e: self._debug('Problem receiving an image: ', e)
def decrypt(input_filename, output_filename, decipher_mode): """Cifra um arquivo de imagem e escreve a saida como PNG.""" input_image = Image.open(input_filename) # Chave deve ser multiplo de 16/24/32 bytes em tamanho. key = "0123456789ABCDEF" mode = AES.MODE_CBC if decipher_mode == 'CBC' else AES.MODE_ECB iv = '2e39234ab3e59652' #iv = os.urandom(IV_SIZE) aes = AES.new(key, mode, iv) image_string = input_image.tostring() # A string de entrada deve ser preenchida de acordo com o tamanho do bloco de entrada. image_padding_length = block_size - len(image_string) % block_size image_string += image_padding_length * "~" # Gerando a sequencia de imagem decifrada decrypted = aes.decrypt(image_string) # Criar uma imagem a partir da sequencia decifrada decrypted_img = Image.frombuffer("RGB", input_image.size, decrypted, 'raw', "RGB", 0, 1) # Criando e salvando a imagem de saida decrypted_img.save(output_filename, 'PNG') print("Decrypted using AES in " + decipher_mode + " mode and saved to \"" + output_filename + "\"!")
def encrypt(input_filename, output_filename, cipher_mode): """Encrypt an image file and write out the results as a JPEG.""" input_image = Image.open(input_filename) key = "0123456789ABCDEF" mode = AES.MODE_CBC if cipher_mode == 'CBC' else AES.MODE_ECB iv = os.urandom(IV_SIZE) aes = AES.new(key, mode, iv) image_string = input_image.tostring() image_padding_length = BLOCK_SIZE - len(image_string) % BLOCK_SIZE image_string += image_padding_length * "~" encrypted = aes.encrypt(image_string) encrypted_img = Image.frombuffer("RGB", input_image.size, encrypted, 'raw', "RGB", 0, 1) encrypted_img.save(output_filename, 'JPEG') print("Encrypted using AES in " + cipher_mode + " mode and saved to \"" + output_filename + "\"!")
def asPilImage(self): import Image src = self.convertTo(vpx.VPX_IMG_FMT_RGB24) img = Image.frombuffer('RGB', (src.width, src.height), src.data) return img
def processFrame(self, frame): if frame['pixformat'] == "RGB_interleaved": mode = "RGB" datakey = "rgb" elif frame['pixformat'] == "RGBA_interleaved": mode = "RGBA" datakey = "rgb" elif frame['pixformat'] == "Y_planar": mode = "L" datakey = "yuv" else: raise ValueError("Can't process images with pixformat '" + frame['pixformat'] + "'") img = Image.frombuffer(mode, frame['size'], frame[datakey]) newimg = img.transform(self.newsize, Image.EXTENT, self.cropbounds, Image.BICUBIC) newrgb = newimg.tostring() newframe = {} for key in frame.keys(): newframe[key] = frame[key] newframe[datakey] = newrgb newframe['size'] = self.newsize return newframe
def screenshot(self): '''Reads a screenshot and returns an Image object. The Python Imaging Library must be available in order to use this function.''' data = ''.join([chr(x) for x in self.raw_screenshot()]) im = Image.frombuffer('L', (320, 240), data, 'raw', 'L', 0, 1) im.putpalette(self.palette) return im
def get_segmentation_img(request, segmentation_id): print segmentation_id filename = os.path.join(settings.SEGMENTATION_ROOT, segmentation_id) if not os.path.exists(filename): raise Http404 outF = open(filename, 'rb') content = outF.read() outF.close() binary_txt = txt2bin(content) header = binary_txt[0:32] (w, h) = struct.unpack("!ii", header[0:8]) pixels = array.array('b', zlib.decompress(binary_txt[32:])) print header, w, h for i in xrange(0, len(pixels), 4): a = pixels[i] pixels[i] = pixels[i + 1] pixels[i + 1] = pixels[i + 2] pixels[i + 2] = pixels[i + 3] pixels[i + 3] = a im = Image.frombuffer("RGBA", (w, h), pixels) response = HttpResponse(mimetype="image/png") im.save(response, "PNG") return response im = Image.open(image_filename) return response