Example #1
0
def grab_frame(wid=None, x1=None, y1=None, x2=None, y2=None):
    if platform.system() == "Darwin":  # Mac OS X
        image_ref = CGWindowListCreateImage(CGRectNull,
                                            kCGWindowListOptionIncludingWindow,
                                            wid,
                                            kCGWindowImageBoundsIgnoreFraming)

        pixeldata = CGDataProviderCopyData(CGImageGetDataProvider(image_ref))

        height = CGImageGetHeight(image_ref)
        width = CGImageGetWidth(image_ref)
        stride = CGImageGetBytesPerRow(image_ref)

        image = Image.frombuffer("RGBA", (width, height),
                                 pixeldata, "raw", "RGBA", stride, 1)

        return np.array(image)
    else:  # Linux
        w, h = x1 + x2, y1 + y2
        size = w * h
        objlength = size * 3

        grab.getScreen.argtypes = []
        result = (ctypes.c_ubyte * objlength)()

        grab.getScreen(x1, y1, w, h, result)
        return Image.frombuffer('RGB', (w, h), result, 'raw', 'RGB', 0, 1)
def screenshotWindows(region, window):
    rect = None
    image = None
    
    if window != None:
        rect = list(win32gui.GetWindowRect(window))
        rect[2] = rect[2] - rect[0]
        rect[3] = rect[3] - rect[1]
        wDC = win32gui.GetWindowDC(window)
        dcObj = win32ui.CreateDCFromHandle(wDC)
        cDC = dcObj.CreateCompatibleDC()
        image = win32ui.CreateBitmap()
        image.CreateCompatibleBitmap(dcObj, rect[2], rect[3])
        cDC.SelectObject(image)
        cDC.BitBlt((0,0),(rect[2], rect[3]) , dcObj, (0,0), win32con.SRCCOPY)
        
        imageInfo = image.GetInfo()
        bpp = imageInfo["bmBitsPixel"]
        pixeldata = image.GetBitmapBits(True)
        
        pImg = None
        if bpp == 32:
            pImg = Image.frombuffer("RGBA", (imageInfo["bmWidth"], imageInfo["bmHeight"]), pixeldata, "raw", "BGRA")
        elif bpp == 24:
            pImg = Image.frombuffer("RGB", (imageInfo["bmWidth"], imageInfo["bmHeight"]), pixeldata)
        
        # Free Resources
        dcObj.DeleteDC()
        cDC.DeleteDC()
        win32gui.ReleaseDC(window, wDC)
        win32gui.DeleteObject(image.GetHandle())
        
        return pImg
Example #3
0
def imsave(filename, im):
    """
    Save an image. It will use SciPy (actually PIL) for any formats it supports.

    Additionally, the following extra formats are supported (extension must be right):
        MHA/MHD:    8-bit gray, 16-bit gray, 32-bit gray, 64-bit gray, float, double, 24-bit RGB
    
    PIL Common Supported Formats: (not all-inclusive)
        PNG:  1-bit BW, 8-bit gray, 16-bit gray, 24-bit RGB
        TIFF: 1-bit BW, 8-bit gray, 16-bit gray, 24-bit RGB
        BMP:  1-bit BW, 8-bit gray, 24-bit RGB
        JPEG: 8-bit gray, 24-bit RGB
        IM:   all?
    
    See thtp://www.pythonware.com/library/pil/handbook/formats.htm for more details
    MHA/MHD code is implemented in the metafile module.
    """
    from os.path import splitext
    
    ext = splitext(filename)[1].lower()
    if ext == '.mha':
        from .metafile import imsave_mha
        imsave_mha(filename, im)
    elif ext == '.mhd':
        from .metafile import imsave_mhd
        imsave_mhd(filename, im)
    elif im.dtype == IM_BIT:
        # Make sure data is actually saved as 1-bit data
        from PIL import Image
        im = im * uint8(255)
        Image.frombuffer('L', im.shape, im.data, 'raw', 'L', 0, 1).convert('1').save(filename)
    else:
        from scipy.misc import imsave
        imsave(filename, im)
def _getRectAsImage(rect):
	try:
		from PIL import Image
	except:
		import Image

	dc, bitmap = getDCAndBitMap(rect=rect)
	try:
		bmpInfo = bitmap.GetInfo()
		# bmpInfo is something like {
		# 	'bmType': 0, 'bmWidthBytes': 5120, 'bmHeight': 1024,
		# 	'bmBitsPixel': 32, 'bmPlanes': 1, 'bmWidth': 1280}
		##print bmpInfo
		size = (bmpInfo['bmWidth'], bmpInfo['bmHeight'])

		if bmpInfo['bmBitsPixel'] == 32:
			# Use GetBitmapBits and BGRX if the bpp == 32, because
			# it's ~15% faster than the method below.
			data = bitmap.GetBitmapBits(True) # asString=True
			return Image.frombuffer(
				'RGB', size, data, 'raw', 'BGRX', 0, 1)
		else:
			# If bpp != 32, we cannot use GetBitmapBits, because it
			# does not return a 24/32-bit image when the screen is at
			# a lower color depth.
			try:
				data, size = getBGR32(dc, bitmap)
			except DIBFailed, e:
				raise GrabFailed("getBGR32 failed. Error was " + str(e))
			# BGR, 32-bit line padding, origo in lower left corner
			return Image.frombuffer(
				'RGB', size, data, 'raw', 'BGR', (size[0] * 3 + 3) & -4, -1)
	finally:
		deleteDCAndBitMap(dc, bitmap)
    def get_tile(self, corners):
        '''crop raster as per pair of world pixel coordinates'''

        ul = [corners[0][i]-self.world_ul[i] for i in (0, 1)]
        sz = [corners[1][i]-corners[0][i] for i in (0, 1)]

        tile_bands = [bnd.ReadRaster(ul[0], ul[1], sz[0], sz[1], sz[0], sz[1], GDT_Byte)
                    for bnd in self.bands]
        n_bands = len(self.bands)
        if n_bands == 1:
            opacity = 1
            mode = 'L'
            if self.transparency is not None:
                if chr(self.transparency) in tile_bands[0]:
                    colorset = set(tile_bands[0])
                    if len(colorset) == 1:  # fully transparent
                        return None, 0
                    else:                   # semi-transparent
                        opacity = -1
            img = Image.frombuffer('L', sz, tile_bands[0], 'raw', 'L', 0, 1)
        else:
            aplpha = tile_bands[-1]
            if min(aplpha) == '\xFF':       # fully opaque
                opacity = 1
                tile_bands = tile_bands[:-1]
                mode = 'RGB' if n_bands > 2 else 'L'
            elif max(aplpha) == '\x00':     # fully transparent
                return None, 0
            else:                           # semi-transparent
                opacity = -1
                mode = 'RGBA' if n_bands > 2 else 'LA'
            img = Image.merge(mode, [Image.frombuffer('L', sz, bnd, 'raw', 'L', 0, 1) for bnd in tile_bands])
        return img, opacity
Example #6
0
  def tile2WebPNG(self, xdim, ydim, tile):
    """Create PNG Images and write to cache for the specified tile"""
    
    # Check if it is mcfc tile
    if self.colors is not None:
      return mcfcPNG(tile, self.colors, enhancement=4.0)

    # If it is not a mcfc tile
    else:
      ch = self.ds.getChannelObj(self.channels[0])
      # write it as a png file
      if ch.getChannelType() in IMAGE_CHANNELS + TIMESERIES_CHANNELS:

        if ch.getChannelDataType() in DTYPE_uint8:
          return Image.frombuffer ( 'L', [xdim,ydim], tile.flatten(), 'raw', 'L', 0, 1 )
        elif ch.getChannelDataType() in DTYPE_uint16:
          if ch.getWindowRange() != [0,0]:
            tile = np.uint8(tile)
            return Image.frombuffer ( 'L', [xdim,ydim], tile.flatten(), 'raw', 'L', 0, 1 )
          else:
            outimage = Image.frombuffer ( 'I;16', [xdim,ydim], tile.flatten(), 'raw', 'I;16', 0, 1)
            return outimage.point(lambda i:i*(1./256)).convert('L')
        elif ch.getChannelDataType() in DTYPE_uint32 :
          return Image.fromarray( tile[0,:,:], 'RGBA')

      elif ch.getChannelType() in ANNOTATION_CHANNELS:
        tile = tile[0,:]
        ndlib.recolor_ctype(tile, tile)
        return Image.frombuffer ( 'RGBA', [xdim,ydim], tile.flatten(), 'raw', 'RGBA', 0, 1 )

      else :
        logger.warning("Datatype not yet supported".format(ch.channel_type))
Example #7
0
def read_final_as_image(f, psdformat):
    width = psdformat["Basic"]['Width']
    height = psdformat["Basic"]['Height']
    channelcounts = psdformat["Basic"]['ChannelCounts']
    compression = psdformat["Image"]['Compression']
    ori = f.tell()
    f.seek(psdformat["Image"]['PosInFile'], os.SEEK_SET)
    
    # 貪婪讀取可能會讀取過多的資料(雖然結尾所剩資料不多)
    # 但是效率較好
    cdata = DecodeFinalImageData(f.read(), compression, channelcounts, width, height)
    cdatas = []
    for ch in range(channelcounts):
        cdatas.append(cdata[ch*width*height:(ch+1)*width*height])
    
    #elif compression == 0:
    #    for ch in range(channelcounts):
    #        cdatas.append(f.read(widht * height))
            
    from PIL import Image
    icr = Image.frombuffer("L", (width, height), cdatas[0], 'raw', "L", 0, 1)
    icg = Image.frombuffer("L", (width, height), cdatas[1], 'raw', "L", 0, 1)
    icb = Image.frombuffer("L", (width, height), cdatas[2], 'raw', "L", 0, 1)
    result = Image.merge("RGB", (icr, icg, icb)) 
    #result.save('irgb.bmp')
    
    f.seek(ori, os.SEEK_SET)
    
    return result
Example #8
0
 def xyImage ( self, window=None ):
   """Create xy slice"""
   if len(self.data.shape) == 3:
     zdim, ydim, xdim = self.data.shape
     return Image.frombuffer ( 'L', (xdim,ydim), self.data[0,:,:].flatten(), 'raw', 'L', 0, 1 ) 
   else:
     zdim,ydim,xdim = self.data.shape[1:]
     return Image.frombuffer ( 'L', (xdim,ydim), self.data[0,0,:,:].flatten(), 'raw', 'L', 0, 1 ) 
  def build_ImageStack ( self, token ):
    """Build the hierarchy of images"""

    with closing ( ocpcaproj.OCPCAProjectsDB() ) as projdb:
      proj = projdb.loadProject ( token )

    with closing ( ocpcadb.OCPCADB (proj) ) as db:

      for resolution in range ( proj.datasetcfg.resolutions[1], len(proj.datasetcfg.resolutions) ):

        # Get the source database sizes
        [ximagesz, yimagesz] = proj.datasetcfg.imagesz [ resolution ]
        [xcubedim, ycubedim, zcubedim] = cubedims = proj.datasetcfg.cubedim [ resolution ]

        # Get the slices
        [ startslice, endslice ] = proj.datasetcfg.slicerange
        slices = endslice - startslice + 1
        [ starttime, endtime ] = proj.datasetcfg.timerange

        # Set the limits for iteration on the number of cubes in each dimension
        # RBTODO These limits may be wrong for even (see channelingest.py)
        xlimit = ximagesz - 1 / xcubedim + 1
        ylimit = yimagesz - 1  / ycubedim + 1
        #  Round up the zlimit to the next larger
        zlimit = (((slices-1)/zcubedim+1)*zcubedim)/zcubedim 

        for z in range(zlimit):
          for y in range(ylimit):
            for x in range(xlimit):

              key = ocplib.XYZMorton ( [x,y,z] )
              olddata = db.cutout ( [ x*2*xcubedim, y*2*ycubedim, z*zcubedim ], [xcubedim*2,ycubedim*2,zcubedim], resolution-1, t ).data

              # target array for the new data (z,y,x) order
              newdata = np.zeros ( cubedims[::-1], dtype=olddata.dtype )

              for sl in range(zcubedim):
                
                # Convert each slice to an image
                # Convert each slice to an image
                if proj.getDBType() in ocpcaproj.DATASETS_16bit:
                  slimage = Image.frombuffer ( 'I;16', (xcubedim*2,ycubedim*2), olddata[sl,:,:].flatten(), 'raw', 'I;16', 0, 1 )
                elif proj.getDBType() in ocpcaproj.DATATSETS_8bit:
                  slimage = Image.frombuffer ( 'L', (xcubedim*2,ycubedim*2), olddata[sl,:,:].flatten(), 'raw', 'L', 0, 1 )

                # Resize the image and Put to a new cube
                newdata[sl,:,:] = np.asarray ( slimage.resize( [xcubedim,ycubedim] ) )

              if proj.getDBType() in ocpcaproj.DATATSETS_8bit:
                newcube = imagecube.ImageCube8 ( cubedims )
              elif proj.getDBType() == ocpcaproj.DATASETS_16bit:
                newcube = imagecube.ImageCube16 ( cubedims )
              
              newcube.data = newdata
              # put in the database
              db.putCube( key, resolution, newcube )

        db.conn.commit()
Example #10
0
    def build_map(cls, sc):
        if isinstance(sc, supplychain):
            pass
        elif type(sc) is str:
            sc = supplychain.factory(sc)
        bounds = sc.bounds()
        if bounds:
            tl, br = bounds
            tl = sc.project(tl, inverse=True)
            br = sc.project(br, inverse=True)
            bounds = (tl, br)
            tlt, brt = cls.fit_bounds(bounds)
        else:
            # for empty maps, just grab the world at zoom level 3 (8x8)
            tlt = tilesetcls.tileclass.factory(xtile=0,ytile=0,zoom=3,tileset=tilesetcls)
            brt = tilesetcls.tileclass.factory(xtile=7,ytile=7,zoom=3,tileset=tilesetcls)
        g = graph(sc)
        stids = g.nids[0:]
        tiers = {} 
        for stid in stids:
            tiers[stid] = 0; 
        max_plen = 0; 
        for i in range(0,len(g.paths)):
            p = g.paths[i] 
            if len(p) > max_plen:
                max_plen = len(p)
            for j in range(0,len(p)):
                if j > tiers[p[j]]:
                    tiers[p[j]] = j
        dfc = cls.default_feature_colors[0:]
        for i in range(0,len(dfc)):
            dfc[i] = Color.fromHex(dfc[i]) 
        palette = Color.graduate(dfc, max_plen)

        staticmap = maptic(tlt, brt)
        dfc = cls.default_feature_colors[0]
        for st in sc.stops:
            st.geometry = st.to_latlon()
            st.attributes["color"] = str(st.attributes.get("color", palette[tiers.get(st.id, dfc)]))
            staticmap.draw_stop(st)
        for h in sc.hops:
            h.geometry = h.to_latlon()
            hc = h.attributes.get("color", None)
            if not hc:
                fc = palette[tiers[h.from_stop_id]]
                tc = palette[tiers[h.to_stop_id]]
                hc = str(fc.midpoint(tc))
            h.attributes["color"] = hc
            staticmap.draw_hop(h)
        if staticmap.hoplayer:
            hl = staticmap.hoplayer
            hlim = Image.frombuffer("RGBA", (hl.get_width(),hl.get_height()), hl.get_data(), "raw", "BGRA", 0, 1)
            staticmap.image = Image.composite(hlim, staticmap.image, hlim)
        if staticmap.stoplayer:
            stl = staticmap.stoplayer
            stlim = Image.frombuffer("RGBA", (stl.get_width(),stl.get_height()), stl.get_data(), "raw", "BGRA", 0, 1)
            staticmap.image = Image.composite(stlim, staticmap.image, stlim)
        return staticmap
Example #11
0
 def xyImage(self):
     """Create xy slice"""
     # This works for 16-> conversions
     zdim, ydim, xdim = self.data.shape[1:]
     if self.data.dtype == np.uint8:
         return Image.frombuffer("L", (xdim, ydim), self.data[0, 0, :, :].flatten(), "raw", "L", 0, 1)
     else:
         outimage = Image.frombuffer("I;16", (xdim, ydim), self.data[0, 0, :, :].flatten(), "raw", "I;16", 0, 1)
         return outimage.point(lambda i: i * (1.0 / 256)).convert("L")
Example #12
0
 def show(self, sequence):
     from PIL import Image
     from struct import pack
     tbuffer = StringIO()
     rgb = self.catch(sequence)
     for c in rgb:
         tbuffer.write(pack("BBB", c[0], c[1], c[2]))
     Image.frombuffer("RGB", (self.width, self.height), tbuffer.getvalue(), "raw", "RGB", 0, 1).show()
     exit(0)
Example #13
0
 def xyImage ( self ):
   """Create xy slice"""
   # This works for 16-> conversions
   zdim,ydim,xdim = self.data.shape
   if self.data.dtype == np.uint8:  
     return Image.frombuffer ( 'L', (xdim,ydim), self.data[0,:,:].flatten(), 'raw', 'L', 0, 1)
   else:
     outimage = Image.frombuffer ( 'I;16', (xdim,ydim), self.data[0,:,:].flatten(), 'raw', 'I;16', 0, 1)
     return outimage.point(lambda i:i*(1./256)).convert('L')
Example #14
0
    def yzImage(self, zscale):
        """Create yz slice"""
        zdim, ydim, xdim = self.data.shape[1:]
        if self.data.dtype == np.uint8:
            outimage = Image.frombuffer("L", (ydim, zdim), self.data[0, :, :, 0].flatten(), "raw", "L", 0, 1)
        else:
            outimage = Image.frombuffer("I;16", (ydim, zdim), self.data[0, :, :, 0].flatten(), "raw", "I;16", 0, 1)
            outimage = outimage.point(lambda i: i * (1.0 / 256)).convert("L")

        return outimage.resize([ydim, int(zdim * zscale)])
Example #15
0
 def yzImage ( self, zscale ):
   """Create yz slice"""
   zdim,ydim,xdim = self.data.shape
   if self.data.dtype == np.uint8:  
     outimage = Image.frombuffer ( 'L', (ydim,zdim), self.data[:,:,0].flatten(), 'raw', 'L', 0, 1)
   else:
     outimage = Image.frombuffer ( 'I;16', (ydim,zdim), self.data[:,:,0].flatten(), 'raw', 'I;16', 0, 1)
     outimage = outimage.point(lambda i:i*(1./256)).convert('L')
   
   return outimage.resize ( [ydim, int(zdim*zscale)] )
Example #16
0
def maxfilter(app_data):

	it  = 0
	app_args = app_data['app_args']

	img_path = app_args.img_file
	img = Image.open(img_path)
	#img.show()
	arr1 = np.array(img)
	print("Image dtype: "+str(arr1.dtype))
	mode = img.mode
	size = img.size

	rows = app_data['R']
	cols = app_data['C']
	in1 = app_data['img_data']['IN']
	in1 = np.reshape(in1, (3, rows, cols))
	in1 = np.rollaxis(in1, 2)
	in1 = np.rollaxis(in1, 2)
	print("Input Image shape: "+str(in1.shape))
	in1 = np.uint8(in1).ravel()
	img_in = Image.frombuffer(mode,size,in1)
	#img_in.show()
	img_in.save("in.jpeg","JPEG")
   
	runs = int(app_args.runs)
	timer = app_args.timer
	if timer == True:
		t1 = time.time()

	while it < runs :
		call_pipe(app_data)
		it += 1

		OUT = app_data['img_data']['OUT']
		OUT = OUT.reshape(4, rows, cols)
		OUT= np.rollaxis(OUT, 2)
		OUT= np.rollaxis(OUT, 2)
		print("Output Image shape: "+str(OUT.shape))
		out1 = np.zeros((rows,cols,3),np.uint8)
		out1[0:rows,0:cols,0:3] = OUT[0:rows,0:cols,0:3]
		out1.ravel()
		img_out = Image.frombuffer(mode,size,out1)
		#img_out.show()
		img_out.save('out.jpeg',"JPEG")

	if timer == True:
		t2 = time.time()

		time_taken = float(t2) - float(t1)
		print("")
		print("[exec_pipe] : time taken to execute = ",
			  (time_taken * 1000) / runs, " ms")

	return
Example #17
0
  def yzImage ( self, zscale ):
    """Create yz slice"""

    if len(self.data.shape) == 3:
      zdim, ydim, xdim = self.data.shape
      outimage = Image.frombuffer ( 'L', (ydim,zdim), self.data[:,:,0].flatten(), 'raw', 'L', 0, 1 ) 
      # if the image scales to 0 pixels it don't work
      return outimage.resize ( [ydim, int(zdim*zscale)] )
    else:
      zdim,ydim,xdim = self.data.shape[1:]
      outimage = Image.frombuffer ( 'L', (ydim,zdim), self.data[0,:,:,0].flatten(), 'raw', 'L', 0, 1 ) 
      #if the image scales to 0 pixels it don't work
      return outimage.resize ( [ydim, int(zdim*zscale)] )
def CreateKey(min_val, max_val, height, width, log_display=True, orientation='left', num_ticks=9):
    height = float(height)
    width = float(width)
    min_val = float(min_val)
    max_val = float(max_val)
    c = canvas.canvas()
    if orientation in ['left', 'right']:
        img = numpy.zeros( (int(round(256.0*width/height)), 256), dtype=numpy.uint32 )
        img.shape = (img.shape[1],img.shape[0])
        img[:, :] = 256**3 * 255 + (255 - numpy.arange(256).reshape(-1, 1)) * 256**2
        pilImage = Image.frombuffer( 'RGBA',(img.shape[1],img.shape[0]),img,'raw','RGBA',0,1)
        c.insert( bitmap.bitmap( 0, 0, pilImage, height=height ) )
    else:
        img = numpy.zeros( (256, int(round(256.0*height/width))), dtype=numpy.uint32 )
        img.shape = (img.shape[1],img.shape[0])
        img[:, :] = 256**3 * 255 + (255 - (numpy.arange(256)[::-1]).reshape(1, -1)) * 256**2
        pilImage = Image.frombuffer( 'RGBA',(img.shape[1],img.shape[0]),img,'raw','RGBA',0,1)
        c.insert( bitmap.bitmap( 0, 0, pilImage, width=width ) )
    c.stroke( path.rect( 0, 0, width, height ), [style.linewidth.THin] )
    if orientation in ['left', 'right']:
        tick_step = height/(num_ticks-1)
    else:
        tick_step = width/(num_ticks-1)
    if log_display == True:
        labels = numpy.exp( min_val + numpy.arange(num_ticks) * (max_val - min_val) / (num_ticks-1) )
    else:
        labels = min_val + numpy.arange(num_ticks) * (max_val - min_val) / (num_ticks-1)
    if orientation == 'left':
        for i in range(num_ticks):
            c.stroke( path.line( -width * 0.4, tick_step * i, 0.0, tick_step * i ), [style.linewidth.THin] )
            c.text(-width * 0.5, tick_step*i, r"%i" % (numpy.round(labels[i])),
                   [text.halign.right, text.valign.middle, text.size(-3)] )
    elif orientation == 'right':
        for i in range(num_ticks):
            c.stroke( path.line( width * 1.4, tick_step * i, width, tick_step * i ), [style.linewidth.THin] )
            c.text(width * 1.5, tick_step * i, r"%i" % (numpy.round(labels[i])),
                   [text.halign.left, text.valign.middle, text.size(-3)] )
    elif orientation == 'top':
        for i in range(num_ticks):
            c.stroke( path.line(tick_step * i, height, tick_step * i, height * 1.4 ), [style.linewidth.THin] )
            c.text(tick_step*i, height * 1.5, r"%i" % (numpy.round(labels[i])),
                   [text.halign.center, text.valign.bottom, text.size(-3)] )
    else:
        for i in range(num_ticks):
            c.stroke( path.line(tick_step * i, 0, tick_step * i, -height * 0.4 ), [style.linewidth.THin] )
            c.text(tick_step*i, -height * 0.5, r"%i" % (numpy.round(labels[i])),
                   [text.halign.center, text.valign.top, text.size(-3)] )
    return c
Example #19
0
        def print_jpg(self, filename_or_obj, *args, **kwargs):
            """
            Supported kwargs:

            *quality*: The image quality, on a scale from 1 (worst) to
                95 (best). The default is 95, if not given in the
                matplotlibrc file in the savefig.jpeg_quality parameter.
                Values above 95 should be avoided; 100 completely
                disables the JPEG quantization stage.

            *optimize*: If present, indicates that the encoder should
                make an extra pass over the image in order to select
                optimal encoder settings.

            *progressive*: If present, indicates that this image
                should be stored as a progressive JPEG file.
            """
            buf, size = self.print_to_buffer()
            if kwargs.pop("dryrun", False):
                return
            image = Image.frombuffer('RGBA', size, buf, 'raw', 'RGBA', 0, 1)
            options = restrict_dict(kwargs, ['quality', 'optimize',
                                             'progressive'])

            if 'quality' not in options:
                options['quality'] = rcParams['savefig.jpeg_quality']

            return image.save(filename_or_obj, format='jpeg', **options)
Example #20
0
def framestream():
	global frame
	while True:
		with frame_cond:
			frame_cond.wait()
			img = Image.frombuffer('RGB', (config.display_width, config.display_height), frame, 'raw', 'RGB', 0, 1)
			yield encode_image(img)
Example #21
0
 def get_text(self):
     """Does OCR on this image."""
     image_writer = ImageWriter("temp")
     try:
         temp_image = image_writer.export_image(self._image_obj)
     except PDFNotImplementedError:
         # No filter method available for this stream
         # https://github.com/euske/pdfminer/issues/99
         return u""
     try:
         text = image_to_string(Image.open("temp/" + temp_image),
                                lang="fin")
     except IOError:
         # PdfMiner did not return an image
         # Let's try to create one ourselves
         # TODO: Create proper color_mode values from ColorSpace
         # Most of the times "L" will create something good enough
         # for OCR, though
         temp_image = Image.frombuffer("L",
                                       self._image_obj.srcsize,
                                       self._stream.get_data(), "raw",
                                       "L", 0, 1)
         text = image_to_string(temp_image, lang="fin")
     unlink("temp/" + temp_image)
     return text
Example #22
0
def fb_dump():
    size = fb_lock()

    if (not size[0]):
        # frame not ready
        return None

    if (size[2] > 2): #JPEG
        num_bytes = size[2]
    else:
        num_bytes = size[0]*size[1]*size[2]

    # read fb data
    __dev.ctrl_transfer(0xC1, __USBDBG_FRAME_DUMP, num_bytes/4, __INTERFACE, 0, __TIMEOUT)
    buff = __dev.read(__IN_EP, num_bytes, __TIMEOUT)

    if size[2] == 1:  # Grayscale
        s = buff.tostring()
        buff = ''.join([y for yyy in zip(s, s, s) for y in yyy])
    elif size[2] == 2: #RGB565
        arr = array('H', buff.tostring())
        arr.byteswap()
        buff = ''.join(map(_rgb, arr))
    else: # JPEG
        try:
            buff = Image.frombuffer("RGB", (size[0], size[1]), buff, "jpeg", "RGB", "").tostring()
        except Exception as e:
            #print ("JPEG decode error (%s)"%(e))
            return None

        if (len(buff) != (size[0]*size[1]*3)):
            return None

    return (size[0], size[1], buff)
def streamVisionSensor(visionSensorName,clientID,pause=0.0001):
    #Get the handle of the vision sensor
    res1,visionSensorHandle=vrep.simxGetObjectHandle(clientID,visionSensorName,vrep.simx_opmode_oneshot_wait)
    #Get the image
    res2,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_streaming)
    #Allow the display to be refreshed
    plt.ion()
    #Initialiazation of the figure
    time.sleep(0.5)
    res,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_buffer)
    im = I.new("RGB", (resolution[0], resolution[1]), "white")
    #Give a title to the figure
    fig = plt.figure(1)    
    fig.canvas.set_window_title(visionSensorName)
    #inverse the picture
    plotimg = plt.imshow(im,origin='lower')
    #Let some time to Vrep in order to let him send the first image, otherwise the loop will start with an empty image and will crash
    time.sleep(1)
    while (vrep.simxGetConnectionId(clientID)!=-1): 
        #Get the image of the vision sensor
        res,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_buffer)
        #Transform the image so it can be displayed using pyplot
        image_byte_array = array.array('b',image)
        im = I.frombuffer("RGB", (resolution[0],resolution[1]), image_byte_array, "raw", "RGB", 0, 1)
        #Update the image
        plotimg.set_data(im)
        #Refresh the display
        plt.draw()
        #The mandatory pause ! (or it'll not work)
        plt.pause(pause)
    print 'End of Simulation'
Example #24
0
def screenshot(filestring=None):
  """
  Save whatever's in the display to a file.

  Will save whatever has been rendered since the last call to Display.clear().

  The file will be saved in the same directory as the app if you don't add a path
  to it!
  
  If this function is called without any argument then it will not save to
  file and will return a numpy array of the screen. The array and file, if
  saved, will have the alpha values removed.
  """

  from pi3d.Display import Display

  w, h = Display.INSTANCE.width, Display.INSTANCE.height
  img = np.zeros((h, w, 4), dtype=np.uint8)
  opengles.glReadPixels(0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, img.ctypes.data_as(ctypes.POINTER(ctypes.c_ubyte)))
  img = img[::-1,:,:3].copy()
  if filestring is None:
    return img

  im = Image.frombuffer('RGB', (w, h), img, 'raw', 'RGB', 0, 1)
  im.save(filestring, quality=90)
Example #25
0
def pixbuf_to_pil(pixbuf):
    """Return a PIL image created from <pixbuf>."""
    dimensions = pixbuf.get_width(), pixbuf.get_height()
    stride = pixbuf.get_rowstride()
    pixels = pixbuf.get_pixels()
    mode = pixbuf.get_has_alpha() and 'RGBA' or 'RGB'
    return Image.frombuffer(mode, dimensions, pixels, 'raw', mode, stride, 1)
Example #26
0
	def __init_image( self ):
		""" Initializes the image drawn on the canvas 
			
			There's a lot of hashing going on in this function that could be
			eliminated by a few more lines of code, like the ones in this comment.
			
			This is an internal function called at instantiation.
		"""
		# Create a pixel array to store our data
		(width,height,bg) = self.config('width','height','background')
		pixels = self.__opts['pixels'] = np.ones((width,height),np.uint32) * bg
		self.__opts['zbuffer'] = np.ones((width,height),np.float32) * 1e3
		pixels.shape = height,width
		
		# Handle Seg faults; seg faults in python are the reason I have trust issues
		def sig_handler(s,f):
			raise Exception("I'm so so so sorry... seg fault")
		signal.signal(signal.SIGSEGV,sig_handler)
		
		# Create an image from the pixel array
		self.__opts['image'] = Image.frombuffer('RGBA',(width,height),pixels,
											'raw','RGBA',0,1).convert('RGB')
		self.__opts['photo_image'] = ImageTk.PhotoImage(self.__opts['image'])
		
		# Draw the image to the canvas
		self.__opts['canvas'].create_image(width/2,height/2-20,
			image=self.__opts['photo_image'])		
def plot_distances(fivec, width, hist, hist_min, hist_max, hist_ranges):
    gamma = fivec.gamma
    mu = fivec.region_means[0]
    hist_bm = numpy.zeros((hist.shape[0], hist.shape[1]), dtype=numpy.uint32)
    hist_bm.fill(int('ffffffff', 16))
    hist1 = numpy.zeros(hist_bm.shape, dtype=numpy.float32)
    where = numpy.where(hist[:, :, 1])
    hist1[where] = hist[where[0], where[1], 0] / hist_max
    hist_bm[-1 - where[1], where[0]] = 256**3 * 255 + numpy.round(255 * hist1[where]).astype(numpy.int32) * 256 ** 2
    hist_img = Image.frombuffer('RGBA', (hist.shape[0], hist.shape[1]), hist_bm, 'raw', 'RGBA', 0, 1)
    c = canvas.canvas()
    c.insert(bitmap.bitmap(0, 0, hist_img, width=width))
    xmin = hist_ranges[0, 0] / 1000
    xmax = hist_ranges[0, 1] / 1000
    ymin = hist_ranges[1, 0]
    ymax = hist_ranges[1, 1]
    g = graph.graphxy( width=width, height=width, 
    x=graph.axis.log(min=xmin, max=xmax, title='', painter=painter), 
    y=graph.axis.log(min=ymin, max=ymax, title='', painter=painter), 
    x2=graph.axis.lin(min=0, max=1, parter=None),
    y2=graph.axis.lin(min=0, max=1, parter=None) )
    g.plot(graph.data.function("y(x) = exp(-%f * log(x*1000) + %f)" % (gamma, mu), min=xmin, max=xmax),
           [graph.style.line([color.cmyk.Red, style.linewidth.THick])])
    c.insert(g)
    return c
Example #28
0
def screenshot_image(hwnd = None, number = ''):
    if not hwnd:
        hwnd=win32gui.GetDesktopWindow()
    l, t, r, b = win32gui.GetWindowRect(hwnd)
    w = r - l
    h = b - t


    hwndDC = win32gui.GetWindowDC(hwnd)
    mfcDC  = win32ui.CreateDCFromHandle(hwndDC)
    saveDC = mfcDC.CreateCompatibleDC()

    saveBitMap = win32ui.CreateBitmap()
    saveBitMap.CreateCompatibleBitmap(mfcDC, w, h)
    saveDC.SelectObject(saveBitMap)

    saveDC.BitBlt((0, 0), (w, h),  mfcDC,  (0, 0),  win32con.SRCCOPY)

    bmpinfo = saveBitMap.GetInfo()
    bmpstr = saveBitMap.GetBitmapBits(True)
    image = Image.frombuffer(
        'RGB',
        (bmpinfo['bmWidth'], bmpinfo['bmHeight']),
        bmpstr, 'raw', 'BGRX', 0, 1)


    win32gui.DeleteObject(saveBitMap.GetHandle())
    saveDC.DeleteDC()
    mfcDC.DeleteDC()
    win32gui.ReleaseDC(hwnd, hwndDC)

    return image
Example #29
0
 def RleDecode(self):
     raw_data=[]
     #Convert 8 bits array to 16 bits array.
     data = np.array(unpack('<%sh' % (len(inBuffer[self.headerlen:-1])/2), inBuffer[self.headerlen:-1]))
     l=len(data)
     if( l%2 != 0):   #Ignore reserved data.
         l=l-1
     package_length=len(data)
     index=0
     bmp_size=0
     while True:
         length =data[index]
         value =data[index+1]
         index+=2
         bmp_size+=length
         buf=[ value for x in xrange(0,length)]
         raw_data+=buf
         if(index>=l):
             break
     width = 800
     height = 480
     #Convert from rgb565 into rgb888
     index=0
     rgb_buf=[]
     num=width*height
     for index in xrange(num):
         rgb_buf+=lu_table[raw_data[index]]
     img_buf=struct.pack("1152000B", *rgb_buf)
     self.im=Image.frombuffer('RGB',(800,480), img_buf, 'raw', 'RGB',0,1)
     if(self.nodename=='pi'):
         self.im=self.im.transpose(Image.FLIP_TOP_BOTTOM) #For raspberry pi only.
Example #30
0
def grabScreen(x1, y1, x2, y2):
    # The calls to winapi
    hwnd = winapi.GetDesktopWindow()

    width = winapi.GetSystemMetrics(SM_CXVIRTUALSCREEN)
    height = winapi.GetSystemMetrics(SM_CYVIRTUALSCREEN)

    x1, y1 = max(x1, 0), max(y1, 0)
    if x2 > width:
        x2 = width
    if y2 > height:
        y2 = height

    W, H = min(x2 - x1, width), min(y2 - y1, height)

    if W < 0:
        W = 1
    if H < 0:
        H = 1

    winDC = winapi.GetWindowDC(hwnd)
    cDC = winapi.CreateCompatibleDC(winDC)
    bmp = winapi.CreateCompatibleBitmap(winDC, W, H)
    winapi.SelectObject(cDC, bmp)
    winapi.BitBlt(cDC, 0, 0, W, H, winDC, x1, y1, SRCCOPY)

    # Image from bitmapbuffer
    rawbmp = winapi.GetBitmapBits(bmp)

    # Destroy all objects!
    winapi.DeleteObject(winapi.SelectObject(cDC, bmp))
    winapi.DeleteDC(cDC)
    winapi.ReleaseDC(hwnd, winDC)

    return Image.frombuffer("RGB", (W, H), rawbmp, "raw", "BGRX", 0, 1)
Example #31
0
def BackFrameCapture():
    # ---------------------------------------- Back Frame ---------------------------------------------------
    # get The Screen window
    hwnd = win32gui.GetDesktopWindow()
    wDC = win32gui.GetWindowDC(hwnd)

    # create DC object for
    dcObj = win32ui.CreateDCFromHandle(wDC)
    cDC = dcObj.CreateCompatibleDC()

    # bitmapping The captured Frame
    dataBitMap = win32ui.CreateBitmap()
    dataBitMap.CreateCompatibleBitmap(dcObj, w, h)

    # Cropping Window
    cDC.SelectObject(dataBitMap)
    cDC.BitBlt((0, 0), (w, h), dcObj, (0, 30), win32con.SRCCOPY)

    # dataBitMap.SaveBitmapFile(cDC, 'Test.jpg')
    bmpinfo = dataBitMap.GetInfo()
    bmpstr = dataBitMap.GetBitmapBits(True)
    im3 = Image.frombuffer(
        'RGB',
        (bmpinfo['bmWidth'], bmpinfo['bmHeight']),
        bmpstr, 'raw', 'BGRX', 0, 1)

    Back = np.array(im3)
    Back = cv2.cvtColor(Back, cv2.COLOR_BGR2RGB)
    PressKey(ChangeView)
    time.sleep(delay)
    ReleaseKey(ChangeView)
    # time.sleep(0.0000000000000001)
    # Cleaning Some memory (Avoid Exception)
    dcObj.DeleteDC()
    cDC.DeleteDC()
    win32gui.ReleaseDC(hwnd, wDC)
    win32gui.DeleteObject(dataBitMap.GetHandle())
    return Back
Example #32
0
 def imageFromDataset(self, dataset):
     """return an image from the dicom dataset using the Python Imaging Library (PIL)"""
     if ('PixelData' not in dataset):
         # DICOM dataset does not have pixel data
         print('no pixels')
         return None
     if ('WindowWidth' not in dataset) or ('WindowCenter' not in dataset):
         print("No window width or center in the dataset")
         # no width/center, so use whole
         bits = dataset.BitsAllocated
         samples = dataset.SamplesperPixel
         if bits == 8 and samples == 1:
             mode = "L"
         elif bits == 8 and samples == 3:
             mode = "RGB"
         elif bits == 16:
             mode = "I;16"  # from sample code: "not sure about this
             # -- PIL source says is 'experimental' and no documentation.
             # Also, should bytes swap depending on endian of file and system??"
         else:
             raise TypeError, "Don't know PIL mode for %d BitsAllocated and %d SamplesPerPixel" % (
                 bits, samples)
         # PIL size = (width, height)
         size = (dataset.Columns, dataset.Rows)
         # Recommended to specify all details by
         #  http://www.pythonware.com/library/pil/handbook/image.htm
         image = Image.frombuffer(mode, size, dataset.PixelData, "raw",
                                  mode, 0, 1).convert('L')
     else:
         image = self.windowedData(dataset.pixel_array, dataset.WindowWidth,
                                   dataset.WindowCenter)
         # Convert mode to L since LUT has only 256 values:
         #  http://www.pythonware.com/library/pil/handbook/image.htm
         if image.dtype != 'int16':
             print('Type is not int16, converting')
             image = numpy.array(image, dtype='int16')
         image = Image.fromarray(image).convert('L')
     return image
Example #33
0
    def poll_cursor(self):
        prev = self.last_cursor_data
        curr = self.do_get_cursor_data()
        self.last_cursor_data = curr

        def cmpv(lcd):
            if not lcd:
                return None
            v = lcd[0]
            if v and len(v) > 2:
                return v[2:]
            return None

        if cmpv(prev) != cmpv(curr):
            fields = ("x", "y", "width", "height", "xhot", "yhot", "serial",
                      "pixels", "name")
            if len(prev or []) == len(curr or []) and len(
                    prev or []) == len(fields):
                diff = []
                for i in range(len(prev)):
                    if prev[i] != curr[i]:
                        diff.append(fields[i])
                cursorlog("poll_cursor() attributes changed: %s", diff)
            if SAVE_CURSORS and curr:
                ci = curr[0]
                if ci:
                    w = ci[2]
                    h = ci[3]
                    serial = ci[6]
                    pixels = ci[7]
                    cursorlog("saving cursor %#x with size %ix%i, %i bytes",
                              serial, w, h, len(pixels))
                    from PIL import Image
                    img = Image.frombuffer("RGBA", (w, h), pixels, "raw",
                                           "BGRA", 0, 1)
                    img.save("cursor-%#x.png" % serial, format="PNG")
            for ss in self._server_sources.values():
                ss.send_cursor()
Example #34
0
    def render_line(self, text):
        """
        Draws a line onto a Cairo surface which will be converted to an pillow
        Image.

        Args:
            text (unicode): A string which will be rendered as a single line.

        Returns:
            PIL.Image of mode 'L'.

        Raises:
            KrakenCairoSurfaceException if the Cairo surface couldn't be created
            (usually caused by invalid dimensions.
        """
        logger.info(u'Rendering line \'{}\''.format(text))
        logger.debug(u'Creating temporary cairo surface')
        temp_surface = cairo.cairo_image_surface_create(0, 0, 0)
        width, height = _draw_on_surface(temp_surface, self.font, self.language, text)
        cairo.cairo_surface_destroy(temp_surface)
        if width == 0 or height == 0:
            logger.error(u'Surface for \'{}\' zero pixels in at least one dimension'.format(text))
            raise KrakenCairoSurfaceException('Surface zero pixels in at least one dimension', width, height)
        logger.debug(u'Creating sized cairo surface')
        real_surface = cairo.cairo_image_surface_create(0, width, height)
        _draw_on_surface(real_surface, self.font, self.language, text)
        logger.debug(u'Extracing data from real surface')
        data = cairo.cairo_image_surface_get_data(real_surface)
        size = int(4 * width * height)
        buffer = ctypes.create_string_buffer(size)
        ctypes.memmove(buffer, data, size)
        logger.debug(u'Loading data into PIL image')
        im = Image.frombuffer("RGBA", (width, height), buffer, "raw", "BGRA", 0, 1)
        cairo.cairo_surface_destroy(real_surface)
        logger.debug(u'Expand and grayscale image')
        im = im.convert('L')
        im = ImageOps.expand(im, 5, 255)
        return im
Example #35
0
 def ImageDecode(self, type):
     if (type):  #1 for RLE decode, 0 for PNG decode.
         raw_data = []
         #Convert 8 bits array to 16 bits array.
         data = unpack('<%sH' % (len(inBuffer[self.headerlen:-1]) / 2),
                       inBuffer[self.headerlen:-1])
         l = len(data)
         if (l % 2 != 0):  #Ignore reserved data.
             l = l - 1
         package_length = len(data)
         index = 0
         bmp_size = 0
         while True:
             length = data[index]
             value = data[index + 1]
             index += 2
             bmp_size += length
             buf = [value for x in xrange(0, length)]
             raw_data += buf
             if (index >= l):
                 break
         width = 800
         height = 480
         #Convert from rgb565 into rgb888
         index = 0
         rgb_buf = []
         num = width * height
         for index in xrange(num):
             rgb_buf += lu_table[raw_data[index]]
         img_buf = struct.pack("1152000B", *rgb_buf)
         self.im = Image.frombuffer('RGB', (width, height), img_buf, 'raw',
                                    'RGB', 0, 1)
     else:  #0 for PNG decode.
         self.im = Image.open(io.BytesIO(inBuffer[self.headerlen:-1]))
         print 'PngDecode()'
     if (self.osname == 'pi'):
         self.im = self.im.transpose(
             Image.FLIP_TOP_BOTTOM)  #For raspberry pi only.
Example #36
0
    def capture(self, debug=False):
        self.focus()  # unfortunatly we need to focus
        # dimensions = win32gui.GetWindowRect(self.hwnd)
        # image = ImageGrab.grab(dimensions)
        # image.show()
        hwnd = self.hwnd
        left, top, right, bot = win32gui.GetWindowRect(hwnd)
        w = right - left
        h = bot - top
        hdesktop = win32gui.GetDesktopWindow()
        hwndDC = win32gui.GetWindowDC(hdesktop)
        # hwndDC = win32gui.GetDC(hwnd)
        mfcDC = win32ui.CreateDCFromHandle(hwndDC)
        saveDC = mfcDC.CreateCompatibleDC()
        saveBitMap = win32ui.CreateBitmap()
        saveBitMap.CreateCompatibleBitmap(mfcDC, w, h)
        saveDC.SelectObject(saveBitMap)

        result = saveDC.BitBlt((0, 0), (w, h), mfcDC, (left, top),
                               win32con.SRCCOPY)

        bmpinfo = saveBitMap.GetInfo()
        bmpstr = saveBitMap.GetBitmapBits(True)

        im = Image.frombuffer('RGB', (bmpinfo['bmWidth'], bmpinfo['bmHeight']),
                              bmpstr, 'raw', 'BGRX', 0, 1)
        open_cv_image = cv2.cvtColor(np.array(im), cv2.COLOR_RGB2BGR)
        del im
        if debug:
            cv2.imshow("Debug capture", open_cv_image)
            cv2.waitKey()

        win32gui.DeleteObject(saveBitMap.GetHandle())
        saveDC.DeleteDC()
        mfcDC.DeleteDC()
        win32gui.ReleaseDC(hdesktop, hwndDC)

        return open_cv_image
Example #37
0
def fb_dump():
    size = fb_size()

    if (not size[0]):
        # frame not ready
        return None

    if (size[2] > 2):  #JPEG
        num_bytes = size[2]
    else:
        num_bytes = size[0] * size[1] * size[2]

    # read fb data
    __serial.write(
        struct.pack("<BBI", __USBDBG_CMD, __USBDBG_FRAME_DUMP, num_bytes))
    buff = __serial.read(num_bytes)

    if size[2] == 1:  # Grayscale
        y = np.fromstring(buff, dtype=np.uint8)
        buff = np.column_stack((y, y, y))
    elif size[2] == 2:  # RGB565
        arr = np.fromstring(buff, dtype=np.uint16).newbyteorder('S')
        r = (((arr & 0xF800) >> 11) * 255.0 / 31.0).astype(np.uint8)
        g = (((arr & 0x07E0) >> 5) * 255.0 / 63.0).astype(np.uint8)
        b = (((arr & 0x001F) >> 0) * 255.0 / 31.0).astype(np.uint8)
        buff = np.column_stack((r, g, b))
    else:  # JPEG
        try:
            buff = np.asarray(
                Image.frombuffer("RGB", size[0:2], buff, "jpeg", "RGB", ""))
        except Exception as e:
            print("JPEG decode error (%s)" % (e))
            return None

    if (buff.size != (size[0] * size[1] * 3)):
        return None

    return (size[0], size[1], buff.reshape((size[1], size[0], 3)))
Example #38
0
def window_capture_image(handles=None):
    """

    :param width:
    :param height:
    :param hwnd:
    :return: 返回PIL.IMAGE对象
    """

    left, top, right, bot = win32gui.GetWindowRect(handles)
    width = right - left
    height = bot - top
    hwndDC = win32gui.GetWindowDC(handles)
    mfcDC = win32ui.CreateDCFromHandle(hwndDC)
    saveDC = mfcDC.CreateCompatibleDC()
    BitMap = win32ui.CreateBitmap()
    BitMap.CreateCompatibleBitmap(mfcDC, width, height)
    saveDC.SelectObject(BitMap)
    saveDC.BitBlt((0, 0), (width, height), mfcDC, (0, 0), win32con.SRCCOPY)

    bmpinfo = BitMap.GetInfo()
    bmpstr = BitMap.GetBitmapBits(True)
    ###生成图像
    im_PIL = Image.frombuffer('RGB', (bmpinfo['bmWidth'], bmpinfo['bmHeight']),
                              bmpstr, 'raw', 'BGRX', 0, 1)

    win32gui.DeleteObject(BitMap.GetHandle())
    saveDC.DeleteDC()
    mfcDC.DeleteDC()
    win32gui.ReleaseDC(handles, hwndDC)
    # if im_gray:
    #     im_PIL = im_PIL.convert('L')
    #     im_PIL = np.array(im_PIL)
    #     return im_PIL
    # im_PIL.save(filename)
    img = np.array(im_PIL)

    return img
Example #39
0
    def slam_no_map(self, path_map_name):
        # doing slam with building maps from zero simultaneously
        next(self.iterator)

        while True:
            if self.flag == 1:
                break

            items = [item for item in next(self.iterator)]

            distances = [item[2] for item in items]
            angles = [item[1] for item in items]

            if len(distances) > MIN_SAMPLES:
                self.slam.update(distances, scan_angles_degrees=angles)
                self.previous_distances = distances.copy()
                self.previous_angles = angles.copy()

            elif self.previous_distances is not None:
                self.slam.update(self.previous_distances,
                                 scan_angles_degrees=self.previous_angles)

            self.x, local_y, local_theta = self.slam.getpos()
            local_theta = local_theta % 360
            if local_theta < 0:
                self.theta = 360 + local.theta
            else:
                self.theta = local_theta

            self.slam.getmap(self.mapbytes)

            # save map generated by slam
            image = Image.frombuffer('L', (MAP_SIZE_PIXELS, MAP_SIZE_PIXELS),
                                     self.mapbytes, 'raw', 'L', 0, 1)
            image.save(path_map_name)

        self.lidar.stop()
        self.lidar.disconnect()
Example #40
0
        def print_jpg(self, filename_or_obj, *args, dryrun=False, **kwargs):
            """
            Other Parameters
            ----------------
            quality : int
                The image quality, on a scale from 1 (worst) to 100 (best).
                The default is :rc:`savefig.jpeg_quality`.  Values above
                95 should be avoided; 100 completely disables the JPEG
                quantization stage.

            optimize : bool
                If present, indicates that the encoder should
                make an extra pass over the image in order to select
                optimal encoder settings.

            progressive : bool
                If present, indicates that this image
                should be stored as a progressive JPEG file.
            """
            buf, size = self.print_to_buffer()
            if dryrun:
                return
            # The image is "pasted" onto a white background image to safely
            # handle any transparency
            image = Image.frombuffer('RGBA', size, buf, 'raw', 'RGBA', 0, 1)
            rgba = mcolors.to_rgba(rcParams['savefig.facecolor'])
            color = tuple([int(x * 255) for x in rgba[:3]])
            background = Image.new('RGB', size, color)
            background.paste(image, image)
            options = {k: kwargs[k]
                       for k in ['quality', 'optimize', 'progressive', 'dpi']
                       if k in kwargs}
            options.setdefault('quality', rcParams['savefig.jpeg_quality'])
            if 'dpi' in options:
                # Set the same dpi in both x and y directions
                options['dpi'] = (options['dpi'], options['dpi'])

            return background.save(filename_or_obj, format='jpeg', **options)
Example #41
0
    def render_pdf(self, input_file: str, render_target_dpi: int) -> Image:
        doc = pdfium.FPDF_LoadDocument(str(input_file), None)

        page = pdfium.FPDF_LoadPage(doc, 0)  # load the first page

        # Page dimensions are measured in points. One point is 1/72 inch (around 0.3528 mm).
        width = int(pdfium.FPDF_GetPageWidthF(page) + 0.5)
        height = int(pdfium.FPDF_GetPageHeightF(page) + 0.5)

        # Converting to page
        render_width = int(width / 72 * render_target_dpi)
        render_height = int(height / 72 * render_target_dpi)

        # render to bitmap
        bitmap = pdfium.FPDFBitmap_Create(render_width, render_height, 0)
        pdfium.FPDFBitmap_FillRect(bitmap, 0, 0, render_width, render_height,
                                   0xFFFFFFFF)
        pdfium.FPDF_RenderPageBitmap(bitmap, page, 0, 0, render_width,
                                     render_height, 0,
                                     pdfium.FPDF_LCD_TEXT | pdfium.FPDF_ANNOT)

        # retrieve data from bitmap
        buffer = pdfium.FPDFBitmap_GetBuffer(bitmap)
        buffer_ = ctypes.cast(
            buffer,
            ctypes.POINTER(ctypes.c_ubyte *
                           (render_width * render_height * 4)))

        img = Image.frombuffer("RGBA", (render_width, render_height),
                               buffer_.contents, "raw", "BGRA", 0, 1)

        if bitmap is not None:
            pdfium.FPDFBitmap_Destroy(bitmap)
        pdfium.FPDF_ClosePage(page)

        pdfium.FPDF_CloseDocument(doc)

        return img
def capture_window_contents(WindowName):
    # Locate the window and assign it to a device context
    hwnd = win32gui.FindWindow(None, WindowName)
    if not hwnd:
        logging.debug("Window not found! Exiting...")
        raise Exception("Window not found! Exiting...")

    left, top, right, bot = win32gui.GetClientRect(hwnd)
    w = right - left
    h = bot - top

    hwndDC = win32gui.GetWindowDC(hwnd)
    mfcDC = win32ui.CreateDCFromHandle(hwndDC)

    # Create a compatible save DC and create a bitmap in memory
    saveDC = mfcDC.CreateCompatibleDC()
    saveBitMap = win32ui.CreateBitmap()
    saveBitMap.CreateCompatibleBitmap(mfcDC, w, h)
    saveDC.SelectObject(saveBitMap)
    result = windll.user32.PrintWindow(hwnd, saveDC.GetSafeHdc(), 1)

    # Build an image from memory
    bmpinfo = saveBitMap.GetInfo()
    bmpstr = saveBitMap.GetBitmapBits(True)
    im = Image.frombuffer('RGB', (bmpinfo['bmWidth'], bmpinfo['bmHeight']),
                          bmpstr, 'raw', 'BGRX', 0, 1)

    # Release memory and device context
    win32gui.DeleteObject(saveBitMap.GetHandle())
    saveDC.DeleteDC()
    mfcDC.DeleteDC()
    win32gui.ReleaseDC(hwnd, hwndDC)

    if result == 1:
        im.save(FilePath)
    else:
        logging.debug("Document could not be saved!")
        raise Exception("Document could not be saved!")
Example #43
0
    def __init__(self,
                 parent=None,
                 peatinfo=None,
                 title=None,
                 data=None,
                 imgfile=None):
        "Initialize the application."
        self.parent = parent
        #If there is data to be loaded, show the dialog first
        if not self.parent:
            Frame.__init__(self)
            self.pilwin = self.master
            self.peatinfo = None
        else:
            self.pilwin = Toplevel()
            #self.peatinfo=peatinfo      #reference to peat protein/field
        if title != None:
            self.title = 'imgviewer_' + title
        else:
            self.title = 'PIL Image Viewer'
        self.pilwin.title(self.title)
        import platform
        self.currplatform = platform.system()
        if not hasattr(self, 'defaultsavedir'):
            self.defaultsavedir = os.getcwd()
        self.preferences = Preferences('PILViewer', {'check_for_update': 1})

        self.pilwin.geometry('+200+100')
        self.currentimage = None
        self.setupGUI()

        if imgfile != None:
            self.openFile(imgfile)
        elif data != None:
            self.currentimage = Image.frombuffer('RGB', (100, 100), data,
                                                 "raw", 'RGB', 0, 1)
            self.updateCanvas()
        return
Example #44
0
def get():
    w = gtk.gdk.get_default_root_window()
    # sz = [w.get_size()]
    # print "The size of the window is %d x %d" % sz

    pb = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, 1920, 1080)
    pb = pb.get_from_drawable(w, w.get_colormap(), 0, 0, 0, 0, 1920, 1080)

    height = pb.get_height()
    width = pb.get_width()
    i = Image.frombuffer("RGB", (width, height), pb.pixel_array, 'raw', 'RGB',
                         0, 1)
    h = i.histogram()
    i = None
    # split into red, green, blue
    r = h[0:256]
    g = h[256:256 * 2]
    b = h[256 * 2:256 * 3]

    print sum(i * w for i, w in enumerate(r)) / sum(r),
    print sum(i * w for i, w in enumerate(g)) / sum(g),
    print sum(i * w for i, w in enumerate(b)) / sum(b)
    params = urllib.urlencode({
        'red':
        sum(i * w for i, w in enumerate(r)) / sum(r),
        'green':
        sum(i * w for i, w in enumerate(g)) / sum(g),
        'blue':
        sum(i * w for i, w in enumerate(b)) / sum(b)
    })
    headers = {
        "Content-type": "application/x-www-form-urlencoded",
        "Accept": "text/plain"
    }
    conn = httplib.HTTPConnection("raspberrypi.local:3000")
    conn.request("POST", "", params, headers)
    response = conn.getresponse()
    conn.close()
Example #45
0
 def dumpFrame(self, path):
     """Save rgb, xyz and uv data for frame as tsv files with simple names"""
     # Save as png.
     # png is 10x the size of jpeg, but we only halve the data per second
     # if we use jpeg as the other data is already compressed
     mode = 'RGBA'
     size = (KINECT_FRAME_WIDTH, KINECT_FRAME_HEIGHT)
     # Swizzle the rgb array to a form PIL will like
     rgb = self.rgb.reshape(self.rgb.shape[0] * self.rgb.shape[1],
                            self.rgb.shape[2])
     rgb = numpy.c_[rgb, 255 * numpy.ones((len(rgb), 1), numpy.uint8)]
     # Inefficiently convert the array to a PIL image
     pil_image = Image.frombuffer(mode, size, rgb.tostring(), 'raw', mode,
                                  0, 1)
     pil_image.save(os.path.join(path, "%s.%s" %
                                 (self.when, KINECT_FRAME_FORMAT)),
                    optimize=True)
     xyz, uv = self.xyzuv()
     # Save as gzipped tsv. Bulky text rather than binary incompatibility
     uvpath = os.path.join(path, "%s.uv.gz" % self.when)
     numpy.savetxt(uvpath, uv, delimeter='\t')
     xyzpath = os.path.join(path, "%s.xyz.gz" % self.when)
     numpy.savetxt(xyzpath, xyz, delimeter='\t')
Example #46
0
def extract_edof(data):
    if data[:3] != b'\xff\xd8\xff': raise "not JPEG"
    idx = data.find(b"\x00edof\x00")
    if idx <= 0: raise "no EDOF"

    orientation = data[idx + 12]
    columns = int.from_bytes(data[idx + 21:idx + 23], 'little')
    rows = int.from_bytes(data[idx + 23:idx + 25], 'little')

    img = Image.frombuffer('L', (columns, rows), data[idx + 73:], 'raw', 'L',
                           0, 0)
    buf = numpy.frombuffer(data[idx + 73:], 'uint8',
                           columns * rows).reshape(rows, columns)[::-1, :]
    if orientation == 0x10:
        img = img.transpose(Image.ROTATE_180)
        buf = numpy.rot90(buf, 2)
    elif orientation == 0x11:
        img = img.transpose(Image.ROTATE_90)
        buf = numpy.rot90(buf)
    elif orientation == 0x13:
        img = img.transpose(Image.ROTATE_270)
        buf = numpy.rot90(buf, -1)
    return buf  #numpy.asarray(img)
Example #47
0
 async def canvas_handler(sid, data):
     print(f'recieved {message} request')
     if canvas_message_valid(data):
         if 'canvasData' in data:
             blob = BytesIO(data['canvasData'])
             img = Image.open(blob)
         else:
             imageData = data['imageData']
             img = Image.frombuffer(
                 'RGBA', (imageData['width'], imageData['height']),
                 imageData['data'])
         output_img = handler(img)
         if isawaitable(output_img):  # handler was async
             output_img = await output_img
         output_buf = BytesIO()
         output_img.save(output_buf, format="PNG")
         base64_img = base64.b64encode(
             output_buf.getvalue()).decode('utf-8')
         print(f'executed {message} request')
         return {'canvasData': base64_img}
     else:
         print(f'rejected {message} request')
         return {'error': 'canvas message invalid'}
Example #48
0
    def renderToFile(self, node_path, filename, frame=None):
        node = self.node(node_path)
        if frame:
            render_frame = frame
        else:
            render_frame = self.frame()

        self.setFrame(render_frame)

        self.setFrame(frame)
        render_file_name = CopperString(self.engine, filename).expandedString()

        logger.info("OpenCL. Rendering frame %s for node %s to file: %s" %
                    (render_frame, node.path(), render_file_name))
        buff = node.getOutHostBuffer()
        image = Image.frombuffer('RGBA', node.size, buff.astype(numpy.uint8),
                                 'raw', 'RGBA', 0, 1)

        if "lin" in sys.platform:
            # Flip image vertically
            image = image.transpose(Image.FLIP_TOP_BOTTOM)

        image.save(render_file_name, 'JPEG', quality=100)
Example #49
0
    def set_icon_from_data(self, pixels, has_alpha, w, h, rowstride, options={}):
        #this is convoluted but it works..
        log("set_icon_from_data%s", ("%s pixels" % len(pixels), has_alpha, w, h, rowstride, options))
        from PIL import Image   #@UnresolvedImport
        if has_alpha:
            img_format = "RGBA"
        else:
            img_format = "RGBX"
        rgb_format = options.get("rgb_format", "RGBA")
        img = Image.frombuffer(img_format, (w, h), pixels, "raw", rgb_format, rowstride, 1)
        assert img, "failed to load image from buffer (%i bytes for %ix%i %s)" % (len(pixels), w, h, rgb_format)
        #apparently, we have to use SM_CXSMICON (small icon) and not SM_CXICON (regular size):
        icon_w = GetSystemMetrics(win32con.SM_CXSMICON)
        icon_h = GetSystemMetrics(win32con.SM_CYSMICON)
        if w!=icon_w or h!=icon_h:
            log("resizing tray icon to %ix%i", icon_w, icon_h)
            img = img.resize((icon_w, icon_h), Image.ANTIALIAS)
            rowstride = w*4

        hicon = image_to_ICONINFO(img)
        self.do_set_icon(hicon)
        UpdateWindow(self.hwnd)
        self.reset_function = (self.set_icon_from_data, pixels, has_alpha, w, h, rowstride)
Example #50
0
 def draw_selector(self, sz, col=(255, 255, 255)):
     w, h = sz
     surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, w, h)
     ctx = cairo.Context(surface)
     rad = 18.5
     deg = math.pi / 180
     x, y = 0, 0
     ctx.new_sub_path()
     ctx.arc(x + w - rad, y + rad, rad, -90 * deg, 0 * deg)
     ctx.arc(x + w - rad, y + h - rad, rad, 0 * deg, 90 * deg)
     ctx.arc(x + rad, y + h - rad, rad, 90 * deg, 180 * deg)
     ctx.arc(x + rad, y + rad, rad, 180 * deg, 270 * deg)
     ctx.close_path()
     ctx.set_source_rgba(*(vec4(vec3(col) / 255, 0.5)))
     ctx.fill_preserve()
     ctx.set_line_width(4.0)
     ctx.set_source_rgba(*(vec4(vec3(col) / 255, 0.8)))
     ctx.stroke()
     im = Image.frombuffer("RGBA", (w, h),
                           surface.get_data().tobytes(), "raw", "BGRA", 0,
                           0)
     buf = im.tobytes()
     return pygame.image.fromstring(buf, (w, h), "RGBA").convert_alpha()
Example #51
0
def image_from_dataset(dataset):
    """
    Create an image from a pydicom dataset using Pillow.

    :param dataset: A pydicom dataset
    """

    if ("PixelData" not in dataset):
        raise TypeError("""This DICOM image does not have pixel data. You can \
        view metadata about it, but you can't view the image.""")

    img = None
    size = (512, 512)

    if 'WindowWidth' in dataset and 'WindowCenter' in dataset:
        img = Image.fromarray(dataset.pixel_array).convert("L")
        img = img.resize(size)
    else:
        mode = "L"
        img = Image.frombuffer(mode, size, dataset.PixelData, "raw", mode, 0,
                               1)

    return img
Example #52
0
def screenshot(file_name=None):
    """
    スクリーンキャプチャを取る
    :param file_name: 画像ファイル名
    :return:
    """

    width = 1920
    height = 1080
    # SCREEN_SCALING_FACTOR = 1.5
    window = win32gui.GetDesktopWindow()
    window_dc = win32ui.CreateDCFromHandle(win32gui.GetWindowDC(window))
    compatible_dc = window_dc.CreateCompatibleDC()
    bmp = win32ui.CreateBitmap()
    bmp.CreateCompatibleBitmap(window_dc, width, height)
    compatible_dc.SelectObject(bmp)
    compatible_dc.BitBlt((0, 0), (width, height), window_dc, (0, 0),
                         win32con.SRCCOPY)
    img = Image.frombuffer('RGB', (width, height), bmp.GetBitmapBits(True),
                           'raw', 'BGRX', 0, 1)
    if file_name is not None:
        img.save(file_name)
    return img
Example #53
0
def take_screenshot(x0, y0, dx, dy):
    """
    Takes a screenshot of the region of the active window starting from
    (x0, y0) with width dx and height dy.
    """

    hwnd = win32gui.GetForegroundWindow()   # Window handle
    wDC = win32gui.GetWindowDC(hwnd)        # Window device context
    dcObj = win32ui.CreateDCFromHandle(wDC)
    cDC = dcObj.CreateCompatibleDC()

    dataBitMap = win32ui.CreateBitmap()     # PyHandle object
    dataBitMap.CreateCompatibleBitmap(dcObj, dx, dy)
    cDC.SelectObject(dataBitMap)
    cDC.BitBlt((0,0),(dx, dy) , dcObj, (x0, y0), win32con.SRCCOPY)
    image = dataBitMap.GetBitmapBits(1)

    dcObj.DeleteDC()
    cDC.DeleteDC()
    win32gui.ReleaseDC(hwnd, wDC)
    win32gui.DeleteObject(dataBitMap.GetHandle())
    get_screenshot = True
    return Image.frombuffer("RGBA", (384, 448), image, "raw", "RGBA", 0, 1)
Example #54
0
def dump_image(subheader, index, imageReader, outDir=None, baseName=None):
    window = SubWindow()
    window.numRows = subheader['numRows'].intValue()
    window.numCols = subheader['numCols'].intValue()
    window.bandList = range(subheader.getBandCount())
    nbpp = subheader['numBitsPerPixel'].intValue()
    bandData = imageReader.read(window)
    
    if not outDir: outDir = os.getcwd()
    if not baseName: baseName = os.path.basename(os.tempnam())
    
    outNames = []
    for band, data in enumerate(bandData):
        outName = '%s_%d__%d_x_%d_%d_band_%d.jpg' % (
             baseName, index, window.numRows, window.numCols, nbpp, band)
        outName = os.path.join(outDir, outName)
        
        #TODO actually check the image type and set the mode properly
        im = Image.frombuffer('L', (window.numCols, window.numRows), data, 'raw', 'L', 0, 1)
        im.save(outName)
        outNames.append(outName)
        logging.info('Wrote band data to file %s' % outName)
    return outNames
Example #55
0
    def _execute_tilequant(self, dithering_mode: DitheringMode,
                           dithering_level: float) -> Image.Image:
        dst_px_idx = (c_uint8 * (self._img.width * self._img.height))()
        dst_pal = (c_uint8 *
                   (self._num_palettes * self._colors_per_palette * 4 * 4))()

        img_data_bytes = self._img.tobytes('raw', 'BGRa')
        img_data = (c_uint8 * len(img_data_bytes))()
        memmove(byref(img_data), img_data_bytes, len(img_data_bytes))

        self.lib.QualetizeFromRawImage(self._img.width, self._img.height,
                                       img_data, None, dst_px_idx, dst_pal, 1,
                                       True, self._num_palettes,
                                       self._colors_per_palette,
                                       self.tile_width, self.tile_height, None,
                                       (c_uint8 * 4)(*[31, 31, 31, 1]),
                                       dithering_mode.value, dithering_level)

        out = Image.frombuffer('P', (self._img.width, self._img.height),
                               dst_px_idx, 'raw', 'P', 0, 1)
        out.putpalette(dst_pal[:3 * 256])

        return out
Example #56
0
    def callback(self, ros_data):
        '''Callback function of subscribed topic.
        Here images get converted and features detected'''
        if VERBOSE:
            print 'received image of type: "%s"' % ros_data.format

        #### direct conversion to CV2 ####
        #np_arr = np.fromstring(ros_data.data, np.uint8)
        from PIL import Image
        np_arr = ros_data.data
        image_byte_array = array.array('b', np_arr)
        image_buffer = I.frombuffer("RGB", (640, 480), image_byte_array, "raw",
                                    "BGR", 0, 1)
        image_buffer = image_buffer.transpose(Image.FLIP_LEFT_RIGHT)
        image_buffer = image_buffer.transpose(Image.ROTATE_180)
        img2 = np.asarray(image_buffer)
        time2 = time.time()

        if VERBOSE:
            print '%s detector found: %s points in: %s sec.' % (
                method, len(featPoints), time2 - time1)
        if len(str(time2)) > 12:
            cv2.imwrite(self.folder_name + "/" + str(time2) + ".png", img2)
Example #57
0
def send_histogram(ctx, channel_name, histogram):
    data = copy.copy(histogram)

    while data[-1] == 0:
        data.pop()

    x = [i for i in range(0, len(data))]
    y = data

    fig = plt.figure(figsize=(10, 10), dpi=80)
    ax = fig.add_subplot(111)
    ax.bar(x, y, label='Histogram')
    fig.canvas.draw()
    data = fig.canvas.tostring_rgb()

    histogram_image = Image.frombuffer("RGB", (800, 800), data, "raw", "RGB",
                                       0, 1)

    neptune_image = neptune.Image(name="Histogram",
                                  description="Histogram",
                                  data=histogram_image)

    ctx.channel_send(channel_name, neptune_image)
Example #58
0
def screenshot():
    left, top, right, bot = win32gui.GetClientRect(hdesktop)
    width = right - left
    height = bot - top
    top += tshift
    left += lshift
    desktop_dc = win32gui.GetWindowDC(hdesktop)
    img_dc = win32ui.CreateDCFromHandle(desktop_dc)
    mem_dc = img_dc.CreateCompatibleDC()
    screenshot = win32ui.CreateBitmap()
    screenshot.CreateCompatibleBitmap(img_dc, width, height)
    mem_dc.SelectObject(screenshot)
    mem_dc.BitBlt((0, 0), (width, height), img_dc, (left, top),
                  win32con.SRCCOPY)

    im = Image.frombuffer('RGB', (width, height),
                          screenshot.GetBitmapBits(True), 'raw', 'RGBX', 0, 1)
    if im.size[0] < 128:
        return None

    mem_dc.DeleteDC()
    win32gui.DeleteObject(screenshot.GetHandle())
    return cv2.cvtColor(np.array(im), cv2.COLOR_RGBA2RGB)
def export_image(filename, i_format, figure, settings):
    oldSize = figure.get_size_inches()
    oldDpi = figure.get_dpi()
    figure.set_size_inches((settings.exportWidth, settings.exportHeight))
    figure.set_dpi(settings.exportDpi)

    canvas = FigureCanvasAgg(figure)
    canvas.draw()
    renderer = canvas.get_renderer()
    if matplotlib.__version__ >= '1.2':
        buf = renderer.buffer_rgba()
    else:
        buf = renderer.buffer_rgba(0, 0)
    size = canvas.get_width_height()
    image = Image.frombuffer('RGBA', size, buf, 'raw', 'RGBA', 0, 1)
    image = image.convert('RGB')
    ext = File.get_type_ext(i_format, File.Types.IMAGE)
    image.save(filename,
               format=ext[1::],
               dpi=(settings.exportDpi, settings.exportDpi))

    figure.set_size_inches(oldSize)
    figure.set_dpi(oldDpi)
Example #60
0
    def __draw_image(self, sizeInches, ppi):
        oldSize = self.figure.get_size_inches()
        oldDpi = self.figure.get_dpi()
        self.figure.set_size_inches(sizeInches)
        self.figure.set_dpi(ppi)

        canvas = FigureCanvasAgg(self.figure)
        canvas.draw()
        renderer = canvas.get_renderer()
        if matplotlib.__version__ >= '1.2':
            buf = renderer.buffer_rgba()
        else:
            buf = renderer.buffer_rgba(0, 0)
        size = canvas.get_width_height()
        image = Image.frombuffer('RGBA', size, buf, 'raw', 'RGBA', 0, 1)

        self.figure.set_size_inches(oldSize)
        self.figure.set_dpi(oldDpi)

        imageWx = wx.Image(image.size[0], image.size[1])
        imageWx.SetData(image.convert('RGB').tobytes())

        return imageWx