def gendigest(digest, password, salt): assert password and salt salt = memoryview_to_bytes(salt) password = strtobytes(password) if digest=="des": from xpra.net.d3des import generate_response #pylint: disable=import-outside-toplevel password = password.ljust(8, b"\x00")[:8] salt = salt.ljust(16, b"\x00")[:16] v = generate_response(password, salt) return hexstr(v) if digest in ("xor", "kerberos", "gss"): #kerberos and gss use xor because we need to use the actual token #at the other end salt = salt.ljust(len(password), b"\x00")[:len(password)] from xpra.buffers.cyxor import xor_str #@UnresolvedImport pylint: disable=import-outside-toplevel v = xor_str(password, salt) return memoryview_to_bytes(v) digestmod = get_digest_module(digest) if not digestmod: log("invalid digest module '%s'", digest) return None #warn_server_and_exit(EXIT_UNSUPPORTED, # "server requested digest '%s' but it is not supported" % digest, "invalid digest") v = hmac.HMAC(password, salt, digestmod=digestmod).hexdigest() return v
def gendigest(digest, password, salt): assert digest and password and salt salt = memoryview_to_bytes(salt) password = strtobytes(password) if digest == "xor": salt = salt.ljust(len(password), "\x00")[:len(password)] return memoryview_to_bytes(xor(password, salt)) digestmod = get_digest_module(digest) if not digestmod: log("invalid digest module '%s'", digest) return None #warn_server_and_exit(EXIT_UNSUPPORTED, "server requested digest '%s' but it is not supported" % digest, "invalid digest") v = hmac.HMAC(strtobytes(password), strtobytes(salt), digestmod=digestmod).hexdigest() return v
def nasty_rgb_via_png_paint(self, cairo_format, has_alpha, img_data, x, y, width, height, rowstride, rgb_format): log("nasty_rgb_via_png_paint%s", (cairo_format, has_alpha, len(img_data), x, y, width, height, rowstride, rgb_format)) #PIL fallback PIL = get_codec("PIL") if has_alpha: oformat = "RGBA" else: oformat = "RGB" #use frombytes rather than frombuffer to be compatible with python3 new-style buffers #this is slower, but since this codepath is already dreadfully slow, we don't care bdata = strtobytes(memoryview_to_bytes(img_data)) try: img = PIL.Image.frombytes(oformat, (width,height), bdata, "raw", rgb_format.replace("X", "A"), rowstride, 1) except ValueError as e: raise Exception("failed to parse raw %s data to %s: %s" % (rgb_format, oformat, e)) #This is insane, the code below should work, but it doesn't: # img_data = bytearray(img.tostring('raw', oformat, 0, 1)) # pixbuf = pixbuf_new_from_data(img_data, COLORSPACE_RGB, True, 8, width, height, rowstride) # success = self.cairo_paint_pixbuf(pixbuf, x, y) #So we still rountrip via PNG: png = BytesIOClass() img.save(png, format="PNG") reader = BytesIOClass(png.getvalue()) png.close() img = cairo.ImageSurface.create_from_png(reader) self.cairo_paint_surface(img, x, y) return True
def nasty_rgb_via_png_paint(self, cairo_format, has_alpha : bool, img_data, x : int, y : int, width : int, height : int, rowstride : int, rgb_format): log.warn("nasty_rgb_via_png_paint%s", (cairo_format, has_alpha, len(img_data), x, y, width, height, rowstride, rgb_format)) #PIL fallback from PIL import Image if has_alpha: oformat = "RGBA" else: oformat = "RGB" #use frombytes rather than frombuffer to be compatible with python3 new-style buffers #this is slower, but since this codepath is already dreadfully slow, we don't care bdata = memoryview_to_bytes(img_data) src_format = rgb_format.replace("X", "A") try: img = Image.frombytes(oformat, (width,height), bdata, "raw", src_format, rowstride, 1) except ValueError as e: log("PIL Image frombytes:", exc_info=True) raise Exception("failed to parse raw %s data as %s to %s: %s" % ( rgb_format, src_format, oformat, e)) from None #This is insane, the code below should work, but it doesn't: # img_data = bytearray(img.tostring('raw', oformat, 0, 1)) # pixbuf = new_from_data(img_data, COLORSPACE_RGB, True, 8, width, height, rowstride) # success = self.cairo_paint_pixbuf(pixbuf, x, y) #So we still rountrip via PNG: from io import BytesIO png = BytesIO() img.save(png, format="PNG") reader = BytesIO(png.getvalue()) png.close() img = ImageSurface.create_from_png(reader) self.cairo_paint_surface(img, x, y, width, height, {}) return True
def nasty_rgb_via_png_paint(self, cairo_format, has_alpha, img_data, x, y, width, height, rowstride, rgb_format): log("nasty_rgb_via_png_paint%s", (cairo_format, has_alpha, len(img_data), x, y, width, height, rowstride, rgb_format)) #PIL fallback PIL = get_codec("PIL") if has_alpha: oformat = "RGBA" else: oformat = "RGB" #use frombytes rather than frombuffer to be compatible with python3 new-style buffers #this is slower, but since this codepath is already dreadfully slow, we don't care bdata = strtobytes(memoryview_to_bytes(img_data)) img = PIL.Image.frombytes(oformat, (width, height), bdata, "raw", rgb_format, rowstride, 1) #This is insane, the code below should work, but it doesn't: # img_data = bytearray(img.tostring('raw', oformat, 0, 1)) # pixbuf = pixbuf_new_from_data(img_data, COLORSPACE_RGB, True, 8, width, height, rowstride) # success = self.cairo_paint_pixbuf(pixbuf, x, y) #So we still rountrip via PNG: png = BytesIOClass() img.save(png, format="PNG") reader = BytesIOClass(png.getvalue()) png.close() img = cairo.ImageSurface.create_from_png(reader) self.cairo_paint_surface(img, x, y) return True
def _do_paint_rgb24(self, img_data, x, y, width, height, rowstride, options): img_data = memoryview_to_bytes(img_data) if INDIRECT_BGR: img_data, rowstride = self.bgr_to_rgb(img_data, width, height, rowstride, options.strget("rgb_format", ""), "RGB") gc = self._backing.new_gc() self._backing.draw_rgb_image(gc, x, y, width, height, gdk.RGB_DITHER_NONE, img_data, rowstride) return True
def _do_paint_rgb24(self, img_data, x, y, width, height, rowstride, options): img_data = memoryview_to_bytes(img_data) gc = self._backing.new_gc() self._backing.draw_rgb_image(gc, x, y, width, height, gdk.RGB_DITHER_NONE, img_data, rowstride) return True
def _process_ws_ping(self, payload): log("_process_ws_ping(%r)", payload) item = encode_hybi_header(OPCODE_PONG, len(payload)) + memoryview_to_bytes(payload) items = (item, ) with self._write_lock: self.raw_write(items)
def _do_paint_rgb(self, cairo_format, has_alpha, img_data, x, y, width, height, rowstride, options): """ must be called from UI thread """ log("cairo._do_paint_rgb(%s, %s, %s bytes,%s,%s,%s,%s,%s,%s)", cairo_format, has_alpha, len(img_data), x, y, width, height, rowstride, options) rgb_format = options.strget("rgb_format", "RGB") if rgb_format in ("ARGB", ): #the pixel format is also what cairo expects #maybe we should also check that the stride is acceptable for cairo? #cairo_stride = cairo.ImageSurface.format_stride_for_width(cairo_format, width) #log("cairo_stride=%s, stride=%s", cairo_stride, rowstride) pix_data = bytearray(img_data) img_surface = cairo.ImageSurface.create_for_data(pix_data, cairo_format, width, height, rowstride) self.cairo_paint_surface(img_surface, x, y) return True if rgb_format in ("RGBA", "RGBX", "RGB"): #with GTK2, we can use a pixbuf from RGB(A) pixels if rgb_format=="RGBA": #we have to unpremultiply for pixbuf! img_data = self.unpremultiply(img_data) #Pixbuf cannot use the memoryview directly: img_data = memoryview_to_bytes(img_data) pixbuf = pixbuf_new_from_data(img_data, COLORSPACE_RGB, has_alpha, 8, width, height, rowstride) self.cairo_paint_pixbuf(pixbuf, x, y) return True self.nasty_rgb_via_png_paint(cairo_format, has_alpha, img_data, x, y, width, height, rowstride, rgb_format) return True
def do_cmp_unmask(buf, hlen, plen): c = memoryview_to_bytes(hybi_unmask(buf, hlen, plen)) w = numpy_unmask(buf, hlen, plen) assert w == c, "expected %s got %s" % ( repr_ellipsized(binascii.hexlify(w)), repr_ellipsized(binascii.hexlify(c)), )
def cairo_paint_pointer_overlay(context, cursor_data, px: int, py: int, start_time): if not cursor_data: return elapsed = max(0, monotonic_time() - start_time) if elapsed > 6: return cw = cursor_data[3] ch = cursor_data[4] xhot = cursor_data[5] yhot = cursor_data[6] pixels = cursor_data[8] x = px - xhot y = py - yhot alpha = max(0, (5.0 - elapsed) / 5.0) log("cairo_paint_pointer_overlay%s drawing pointer with cairo, alpha=%s", (context, x, y, start_time), alpha) context.translate(x, y) context.rectangle(0, 0, cw, ch) argb = unpremultiply_argb(pixels) img_data = memoryview_to_bytes(argb) pixbuf = get_pixbuf_from_data(img_data, True, cw, ch, cw * 4) context.set_operator(cairo.OPERATOR_OVER) Gdk.cairo_set_source_pixbuf(context, pixbuf, 0, 0) context.paint()
def _test_csc(self, mod, width=16, height=16, in_csc="BGRX", out_csc="YUV420P", pixel="00000000", expected=()): csc_mod = loader.load_codec(mod) if not csc_mod: print("%s not found" % mod) return if in_csc not in csc_mod.get_input_colorspaces(): raise Exception("%s does not support %s as input" % (mod, in_csc)) if out_csc not in csc_mod.get_output_colorspaces(in_csc): raise Exception("%s does not support %s as output for %s" % (mod, out_csc, in_csc)) csc = csc_mod.ColorspaceConverter() csc.init_context(width, height, in_csc, width, height, out_csc) image = make_test_image(in_csc, width, height) size = image.get_rowstride()//4*image.get_height() bgrx = h2b(pixel)*size image.set_pixels(bgrx) out_image = csc.convert_image(image) csc.clean() assert out_image.get_planes()>=len(expected) #now verify the value for each plane specified: for i, v_str in enumerate(expected): plane = out_image.get_pixels()[i] #plane_stride = out_image.get_rowstride()[i] #assert len(plane)>=plane_stride*out_image.get_height() plane_bytes = memoryview_to_bytes(plane) v = h2b(v_str) if not cmpp(plane_bytes, v): raise Exception("%s: plane %s, expected %s but got %s" % ( mod, out_csc[i], v_str, hexstr(plane_bytes[:len(v)])))
def _do_paint_rgb32(self, img_data, x, y, width, height, rowstride, options): has_alpha = options.boolget( "has_alpha", False) or options.get("rgb_format", "").find("A") >= 0 if has_alpha: img_data = self.unpremultiply(img_data) if isinstance(img_data, (memoryview, _buffer, bytearray)): img_data = memoryview_to_bytes(img_data) if INDIRECT_BGR: img_data, rowstride = self.bgr_to_rgb( img_data, width, height, rowstride, options.strget("rgb_format", ""), "RGBA") if has_alpha: #draw_rgb_32_image does not honour alpha, we have to use pixbuf: pixbuf = gdk.pixbuf_new_from_data(img_data, gdk.COLORSPACE_RGB, True, 8, width, height, rowstride) cr = self._backing.cairo_create() cr.rectangle(x, y, width, height) cr.set_source_pixbuf(pixbuf, x, y) cr.set_operator(cairo.OPERATOR_SOURCE) cr.paint() else: #no alpha or scaling is easier: gc = self._backing.new_gc() self._backing.draw_rgb_32_image(gc, x, y, width, height, gdk.RGB_DITHER_NONE, img_data, rowstride) if self.paint_box_line_width > 0: self.paint_box(x, y, width, height, options) return True
def restride(self, rowstride): assert not self.freed if self.planes>0: #not supported yet for planar images return False pixels = self.pixels assert pixels, "no pixel data to restride" oldstride = self.rowstride pos = 0 lines = [] for _ in range(self.height): lines.append(memoryview_to_bytes(pixels[pos:pos+rowstride])) pos += oldstride if self.height>0 and oldstride<rowstride: #the last few lines may need padding if the new rowstride is bigger #(usually just the last line) #we do this here to avoid slowing down the main loop above #as this should be a rarer case for h in range(self.height): i = -(1+h) line = lines[i] if len(line)<rowstride: lines[i] = line + b"\0"*(rowstride-len(line)) else: break self.rowstride = rowstride self.pixels = b"".join(lines) return True
def damage(self, _wid, window, x, y, w, h, options=None): polling = options and options.get("polling", False) p = self.protocol if polling and p is None or p.queue_size() >= 2: #very basic RFB update rate control, #if there are packets waiting already #we'll just process the next polling update instead: return img = window.get_image(x, y, w, h) window.acknowledge_changes() log("damage: %s", img) if not img or self.is_closed(): return fbupdate = struct.pack(b"!BBH", 0, 0, 1) encoding = 0 #Raw rect = struct.pack(b"!HHHHi", x, y, w, h, encoding) if img.get_rowstride() != w * 4: img.restride(w * 4) pixels = img.get_pixels() assert len(pixels) >= 4 * w * h pixels = pixels[:4 * w * h] if len(pixels) <= PACKET_JOIN_SIZE: self.send(fbupdate + rect + memoryview_to_bytes(pixels)) else: self.send(fbupdate + rect) self.send(pixels)
def get_sub_image(self, x, y, w, h): #raise NotImplementedError("no sub-images for %s" % type(self)) assert w>0 and h>0, "invalid sub-image size: %ix%i" % (w, h) if x+w>self.width: raise Exception("invalid sub-image width: %i+%i greater than image width %i" % (x, w, self.width)) if y+h>self.height: raise Exception("invalid sub-image height: %i+%i greater than image height %i" % (y, h, self.height)) assert self.planes==0, "cannot sub-divide planar images!" if x==0 and y==0 and w==self.width and h==self.height: #same dimensions, use the same wrapper return self #copy to local variables: pixels = self.pixels oldstride = self.rowstride pos = y*oldstride + x*self.bytesperpixel newstride = w*self.bytesperpixel lines = [] for _ in range(h): lines.append(memoryview_to_bytes(pixels[pos:pos+newstride])) pos += oldstride image = ImageWrapper(self.x+x, self.y+y, w, h, b"".join(lines), self.pixel_format, self.depth, newstride, planes=self.planes, thread_safe=True, palette=self.palette) image.set_target_x(self.target_x+x) image.set_target_y(self.target_y+y) return image
def _do_paint_rgb(self, cairo_format, has_alpha, img_data, x, y, width, height, rowstride, options): """ must be called from UI thread """ log("cairo._do_paint_rgb(%s, %s, %s bytes,%s,%s,%s,%s,%s,%s)", cairo_format, has_alpha, len(img_data), x, y, width, height, rowstride, options) rgb_format = options.strget("rgb_format", "RGB") if rgb_format in ("ARGB", ): #the pixel format is also what cairo expects #maybe we should also check that the stride is acceptable for cairo? #cairo_stride = cairo.ImageSurface.format_stride_for_width(cairo_format, width) #log("cairo_stride=%s, stride=%s", cairo_stride, rowstride) pix_data = bytearray(img_data) img_surface = cairo.ImageSurface.create_for_data( pix_data, cairo_format, width, height, rowstride) self.cairo_paint_surface(img_surface, x, y, options) return True if rgb_format in ("RGBA", "RGBX", "RGB"): #with GTK2, we can use a pixbuf from RGB(A) pixels if rgb_format == "RGBA": #we have to unpremultiply for pixbuf! img_data = self.unpremultiply(img_data) #Pixbuf cannot use the memoryview directly: img_data = memoryview_to_bytes(img_data) pixbuf = pixbuf_new_from_data(img_data, COLORSPACE_RGB, has_alpha, 8, width, height, rowstride) self.cairo_paint_pixbuf(pixbuf, x, y, options) return True self.nasty_rgb_via_png_paint(cairo_format, has_alpha, img_data, x, y, width, height, rowstride, rgb_format) return True
def main(filename): from io import BytesIO from xpra.os_util import memoryview_to_bytes from xpra.gtk_common.gtk_util import get_default_root_window, get_root_size root = get_default_root_window() capture = setup_capture(root) capture.refresh() w, h = get_root_size() image = capture.get_image(0, 0, w, h) from PIL import Image fmt = image.get_pixel_format().replace("X", "A") pixels = memoryview_to_bytes(image.get_pixels()) log("converting %i bytes in format %s to RGBA", len(pixels), fmt) if len(fmt) == 3: target = "RGB" else: target = "RGBA" pil_image = Image.frombuffer(target, (w, h), pixels, "raw", fmt, image.get_rowstride()) if target != "RGB": pil_image = pil_image.convert("RGB") buf = BytesIO() pil_image.save(buf, "png") data = buf.getvalue() buf.close() with open(filename, "wb") as f: f.write(data) return 0
def get_sub_image(self, x, y, w, h): #raise NotImplementedError("no sub-images for %s" % type(self)) assert w > 0 and h > 0, "invalid sub-image size: %ix%i" % (w, h) if x + w > self.width: raise Exception( "invalid sub-image width: %i+%i greater than image width %i" % (x, w, self.width)) if y + h > self.height: raise Exception( "invalid sub-image height: %i+%i greater than image height %i" % (y, h, self.height)) assert self.planes == 0, "cannot sub-divide planar images!" from xpra.os_util import memoryview_to_bytes #copy to local variables: pixels = self.pixels oldstride = self.rowstride pos = y * oldstride + x * 4 newstride = w * 4 lines = [] for _ in range(h): lines.append(memoryview_to_bytes(pixels[pos:pos + newstride])) pos += oldstride return ImageWrapper(self.x + x, self.y + y, w, h, b"".join(lines), self.pixel_format, self.depth, newstride, planes=self.planes, thread_safe=True)
def compress_image(self, image, options=None): log("compress_image(%s, %s)", image, options) #pass the pixels as they are assert image.get_planes( ) == ImageWrapper.PACKED, "invalid number of planes: %s" % image.get_planes( ) pixels = image.get_pixels() assert pixels, "failed to get pixels from %s" % image #info used by proxy encoder: client_options = { "proxy": True, "frame": self.frames, "pts": image.get_timestamp() - self.first_frame_timestamp, "timestamp": image.get_timestamp(), "rowstride": image.get_rowstride(), "depth": image.get_depth(), "rgb_format": image.get_pixel_format(), #pass-through encoder options: "options": options or {}, } if self.frames == 0: self.first_frame_timestamp = image.get_timestamp() #must pass dst_formats so the proxy can instantiate the video encoder #with the correct CSC config: client_options["dst_formats"] = self.dst_formats log("compress_image(%s, %s) returning %s bytes and options=%s", image, options, len(pixels), client_options) self.last_frame_times.append(monotonic()) self.frames += 1 return memoryview_to_bytes(pixels[:]), client_options
def _pipe_write(self, buf): bbuf = memoryview_to_bytes(buf) size = len(bbuf) log("pipe_write: %i bytes", size) #binascii.hexlify(buf)) written = c_ulong(0) r = WriteFile(self.pipe_handle, c_char_p(bbuf), size, byref(written), byref(self.write_overlapped)) log("WriteFile(..)=%s, len=%i", r, written.value) if not r and self.pipe_handle: e = GetLastError() if e!=ERROR_IO_PENDING: log("WriteFile: %s", IO_ERROR_STR.get(e, e)) if e in CONNECTION_CLOSED_ERRORS: raise ConnectionClosedException(CONNECTION_CLOSED_ERRORS[e]) r = WaitForSingleObject(self.write_event, INFINITE) log("WaitForSingleObject(..)=%s, len=%i", WAIT_STR.get(r, r), written.value) if not self.pipe_handle: #closed already! return written.value if r: raise Exception("failed to write buffer to named pipe handle %s" % self.pipe_handle) if self.pipe_handle: if not GetOverlappedResult(self.pipe_handle, byref(self.write_overlapped), byref(written), False): e = GetLastError() raise Exception("overlapped write failed: %s" % IO_ERROR_STR.get(e, e)) log("pipe_write: %i bytes written", written.value) if self.pipe_handle: FlushFileBuffers(self.pipe_handle) #SetFilePointer(self.pipe_handle, 0, FILE_BEGIN) return written.value
def parse_ws_frame(self, buf): if not buf: self._read_queue_put(buf) return if self.ws_data: ws_data = self.ws_data+buf self.ws_data = b"" else: ws_data = buf log("parse_ws_frame(%i bytes) total buffer is %i bytes", len(buf), len(ws_data)) while ws_data and not self._closed: parsed = decode_hybi(ws_data) if parsed is None: log("parse_ws_frame(%i bytes) not enough data", len(ws_data)) #not enough data to get a full websocket frame, #save it for later: self.ws_data = ws_data return opcode, payload, processed, fin = parsed ws_data = ws_data[processed:] log("parse_ws_frame(%i bytes) payload=%i bytes, processed=%i, remaining=%i, opcode=%s, fin=%s", len(buf), len(payload), processed, len(ws_data), OPCODES.get(opcode, opcode), fin) if opcode==OPCODE_CONTINUE: assert self.ws_payload_opcode and self.ws_payload, "continuation frame does not follow a partial frame" self.ws_payload.append(payload) if not fin: #wait for more continue #join all the frames and process the payload: full_payload = b"".join(memoryview_to_bytes(v) for v in self.ws_payload) self.ws_payload = [] opcode = self.ws_payload_opcode self.ws_payload_opcode = 0 else: if self.ws_payload and self.ws_payload_opcode: raise Exception("expected a continuation frame not %s" % OPCODES.get(opcode, opcode)) full_payload = payload if not fin: if opcode not in (OPCODE_BINARY, OPCODE_TEXT): raise Exception("cannot handle fragmented '%s' frames" % OPCODES.get(opcode, opcode)) #fragmented, keep this payload for later self.ws_payload_opcode = opcode self.ws_payload.append(payload) continue if opcode==OPCODE_BINARY: self._read_queue_put(full_payload) elif opcode==OPCODE_TEXT: if first_time("ws-text-frame-from-%s" % self._conn): log.warn("Warning: handling text websocket frame as binary") self._read_queue_put(full_payload) elif opcode==OPCODE_CLOSE: self._process_ws_close(full_payload) elif opcode==OPCODE_PING: self._process_ws_ping(full_payload) elif opcode==OPCODE_PONG: self._process_ws_pong(full_payload) else: log.warn("Warning unhandled websocket opcode '%s'", OPCODES.get(opcode, "%#x" % opcode)) log("payload=%r", payload)
def image_to_ICONINFO(img): w, h = img.size from xpra.codecs.argb.argb import rgba_to_bgra #@UnresolvedImport if TRAY_ALPHA and img.mode.find("A")>=0: #ie: RGBA rgb_format = "BGRA" else: rgb_format = "BGR" rgb_data = memoryview_to_bytes(rgba_to_bgra(img.tobytes("raw", rgb_format))) return make_ICONINFO(w, h, rgb_data, rgb_format=rgb_format)
def main(): if "-v" in sys.argv or "--verbose" in sys.argv: log.enable_debug() add_debug_category("nvfbc") from xpra.platform import program_context with program_context("NvFBC-Capture", "NvFBC Capture"): from xpra.platform.paths import get_download_dir from xpra.util import print_nested_dict from xpra.os_util import WIN32, LINUX if WIN32: from xpra.codecs.nvfbc import fbc_capture_win as fbc_capture #@UnresolvedImport @UnusedImport elif LINUX: from xpra.codecs.nvfbc import fbc_capture_linux as fbc_capture #@UnresolvedImport @Reimport else: raise Exception("nvfbc is not support on %s" % sys.platform) fbc_capture.init_module() log.info("Info:") print_nested_dict(fbc_capture.get_info(), print_fn=log.info) log.info("Status:") print_nested_dict(fbc_capture.get_status(), print_fn=log.info) try: log("creating test capture class") if USE_NVFBC_CUDA: c = fbc_capture.NvFBC_CUDACapture() #@UndefinedVariable else: c = fbc_capture.NvFBC_SysCapture() #@UndefinedVariable log("Capture=%s", c) c.init_context() assert c.refresh() except Exception as e: log("Capture()", exc_info=True) log.error("Error: failed to create test capture instance:") log.error(" %s", e) return 1 image = c.get_image() assert image from PIL import Image w = image.get_width() h = image.get_height() pixels = memoryview_to_bytes(image.get_pixels()) stride = image.get_rowstride() rgb_format = image.get_pixel_format() try: img = Image.frombuffer("RGB", (w, h), pixels, "raw", rgb_format, stride, 1) filename = os.path.join( os.path.expanduser(get_download_dir()), "screenshot-%s-%i.png" % (rgb_format, time.time())) img.save(filename, "png") log.info("screenshot saved to %s", filename) return 0 except Exception as e: log.warn("Error: failed to save %s:", rgb_format) log.warn(" %s", e) return 1
def set_tray_icon(self, tray_data): enc, w, h, rowstride, pixels, options = tray_data log("%s.set_tray_icon(%s, %s, %s, %s, %s bytes)", self, enc, w, h, rowstride, len(pixels)) has_alpha = enc=="rgb32" tw = self.tray_widget if tw: #some tray implementations can't deal with memoryviews.. if isinstance(pixels, (memoryview, bytearray)): pixels = memoryview_to_bytes(pixels) tw.set_icon_from_data(pixels, has_alpha, w, h, rowstride, options)
def set_icon_from_data(self, pixels, has_alpha, w, h, rowstride, options={}): #this is convoluted but it works.. log("set_icon_from_data%s", ("%s pixels" % len(pixels), has_alpha, w, h, rowstride, options)) from PIL import Image #@UnresolvedImport if has_alpha: img_format = "RGBA" else: img_format = "RGBX" rgb_format = options.get("rgb_format", "RGBA") img = Image.frombuffer(img_format, (w, h), pixels, "raw", rgb_format, rowstride, 1) assert img, "failed to load image from buffer (%i bytes for %ix%i %s)" % ( len(pixels), w, h, rgb_format) #apparently, we have to use SM_CXSMICON (small icon) and not SM_CXICON (regular size): icon_w = GetSystemMetrics(win32con.SM_CXSMICON) icon_h = GetSystemMetrics(win32con.SM_CYSMICON) if w != icon_w or h != icon_h: log("resizing tray icon to %ix%i", icon_w, icon_h) img = img.resize((w, h), Image.ANTIALIAS) bitmap = 0 mask = 0 try: from xpra.codecs.argb.argb import rgba_to_bgra #@UnresolvedImport bgra = memoryview_to_bytes(rgba_to_bgra(img.tobytes())) bitmap = rgba_to_bitmap(bgra, icon_w, icon_h) mask = CreateBitmap(icon_w, icon_h, 1, 1, None) iconinfo = ICONINFO() iconinfo.fIcon = True iconinfo.hbmMask = mask iconinfo.hbmColor = bitmap hicon = CreateIconIndirect(byref(iconinfo)) log("CreateIconIndirect()=%s", hicon) if not hicon: raise ctypes.WinError(ctypes.get_last_error()) except Exception: log.error("Error: failed to set tray icon", exc_info=True) hicon = FALLBACK_ICON finally: if mask: DeleteObject(mask) if bitmap: DeleteObject(bitmap) self.do_set_icon(hicon) UpdateWindow(self.hwnd) self.reset_function = (self.set_icon_from_data, pixels, has_alpha, w, h, rowstride)
def _do_paint_rgb32(self, img_data, x, y, width, height, rowstride, options): #log.info("do_paint_rgb32(%s bytes, %s, %s, %s, %s, %s, %s) backing depth=%s", len(img_data), x, y, width, height, rowstride, options, self._backing.get_depth()) #log.info("data head=%s", [hex(ord(v))[2:] for v in list(img_data[:500])]) rgba = memoryview_to_bytes(self.unpremultiply(img_data)) pixbuf = gdk.pixbuf_new_from_data(rgba, gtk.gdk.COLORSPACE_RGB, True, 8, width, height, rowstride) cr = self._backing.cairo_create() cr.rectangle(x, y, width, height) cr.set_source_pixbuf(pixbuf, x, y) cr.set_operator(cairo.OPERATOR_SOURCE) cr.paint() return True
def _process_ws_close(self, payload): log("_process_ws_close(%r)", payload) if len(payload)<2: self._connection_lost("unknown reason") return code = struct.unpack(">H", payload[:2])[0] try: reason = memoryview_to_bytes(payload[2:]).decode("utf8") except UnicodeDecodeError: reason = str(reason) self._connection_lost("code %i: %s" % (code, reason))
def gendigest(digest, password, salt): assert digest and password and salt salt = memoryview_to_bytes(salt) password = strtobytes(password) if digest == "des": from xpra.net.d3des import generate_response password = password.ljust(8, "\x00")[:8] salt = salt.ljust(16, "\x00")[:16] v = generate_response(password, salt) return hexstr(v) elif digest == "xor": salt = salt.ljust(16, "\x00")[:len(password)] return memoryview_to_bytes(xor(password, salt)) digestmod = get_digest_module(digest) if not digestmod: log("invalid digest module '%s': %s", digest) return None #warn_server_and_exit(EXIT_UNSUPPORTED, "server requested digest '%s' but it is not supported" % digest, "invalid digest") v = hmac.HMAC(strtobytes(password), strtobytes(salt), digestmod=digestmod).hexdigest() return v
def read(self, n): #FIXME: we should try to honour n while self.is_active(): if self.pending_read.qsize(): buf = self.pending_read.get() log("read() returning pending read buffer, len=%i", len(buf)) self.input_bytecount += len(buf) return memoryview_to_bytes(buf) bufs, closed_string = self.ws_handler.recv_frames() if closed_string: log("read() closed_string: %s", memoryview_to_bytes(closed_string)) self.active = False log("read() got %i ws frames", len(bufs)) if bufs: buf = bufs[0] if len(bufs) > 1: for v in bufs[1:]: self.pending_read.put(v) self.input_bytecount += len(buf) return memoryview_to_bytes(buf)
def check(self, str_value): b = strtobytes(str_value) assert b s = bytestostr(b) assert s assert s==str_value if not _memoryview: return mv = _memoryview(b) mvb = memoryview_to_bytes(mv) mvs = bytestostr(mvb) assert mvs==str_value
def check(self, str_value): b = strtobytes(str_value) assert b s = bytestostr(b) assert s assert s == str_value if not memoryview: return mv = memoryview(b) mvb = memoryview_to_bytes(mv) mvs = bytestostr(mvb) assert mvs == str_value
def _do_paint_rgb24(self, img_data, x, y, width, height, rowstride, options): if isinstance(img_data, (memoryview, _buffer, bytearray)): img_data = memoryview_to_bytes(img_data) if INDIRECT_BGR: img_data, rowstride = self.bgr_to_rgb( img_data, width, height, rowstride, options.strget("rgb_format", ""), "RGB") gc = self._backing.new_gc() self._backing.draw_rgb_image(gc, x, y, width, height, gdk.RGB_DITHER_NONE, img_data, rowstride) if self.paint_box_line_width > 0: self.paint_box(x, y, width, height, options) return True
def compressed_wrapper(datatype, data, level=5, zlib=False, lz4=False, lzo=False, can_inline=True): bdata = memoryview_to_bytes(data) if lz4: assert use_lz4, "cannot use lz4" algo = "lz4" cl, cdata = lz4_compress(bdata, level) elif lzo: assert use_lzo, "cannot use lzo" algo = "lzo" cl, cdata = lzo_compress(bdata, level) else: assert use_zlib, "cannot use zlib" algo = "zlib" cl, cdata = zcompress(bdata, level) return LevelCompressed(datatype, cdata, cl, algo, can_inline=can_inline)
def get_sub_image(self, x, y, w, h): #raise NotImplementedError("no sub-images for %s" % type(self)) assert w>0 and h>0, "invalid sub-image size: %ix%i" % (w, h) if x+w>self.width: raise Exception("invalid sub-image width: %i+%i greater than image width %i" % (x, w, self.width)) if y+h>self.height: raise Exception("invalid sub-image height: %i+%i greater than image height %i" % (y, h, self.height)) assert self.planes==0, "cannot sub-divide planar images!" #copy to local variables: pixels = self.pixels oldstride = self.rowstride pos = y*oldstride + x*4 newstride = w*4 lines = [] for _ in range(h): lines.append(memoryview_to_bytes(pixels[pos:pos+newstride])) pos += oldstride return ImageWrapper(self.x+x, self.y+y, w, h, b"".join(lines), self.pixel_format, self.depth, newstride, planes=self.planes, thread_safe=True)
def update_icon(self, width, height, coding, data): coding = bytestostr(coding) iconlog("%s.update_icon(%s, %s, %s, %s bytes)", self, width, height, coding, len(data)) if PYTHON3 and WIN32: iconlog("not setting icon to prevent crashes..") return if coding == "premult_argb32": #we usually cannot do in-place and this is not performance critical data = unpremultiply_argb(data) rgba = memoryview_to_bytes(bgra_to_rgba(data)) pixbuf = get_pixbuf_from_data(rgba, True, width, height, width*4) else: loader = PixbufLoader() loader.write(data) loader.close() pixbuf = loader.get_pixbuf() #for debugging, save to a file so we can see it: #pixbuf.save("C-%s-%s.png" % (self._id, int(time.time())), "png") iconlog("%s.set_icon(%s)", self, pixbuf) self.set_icon(pixbuf)
def _do_paint_rgb32(self, img_data, x, y, width, height, rowstride, options): has_alpha = options.boolget("has_alpha", False) or options.get("rgb_format", "").find("A")>=0 if has_alpha: img_data = self.unpremultiply(img_data) img_data = memoryview_to_bytes(img_data) if INDIRECT_BGR: img_data, rowstride = self.bgr_to_rgb(img_data, width, height, rowstride, options.strget("rgb_format", ""), "RGBA") if has_alpha: #draw_rgb_32_image does not honour alpha, we have to use pixbuf: pixbuf = gdk.pixbuf_new_from_data(img_data, gdk.COLORSPACE_RGB, True, 8, width, height, rowstride) cr = self._backing.cairo_create() cr.rectangle(x, y, width, height) cr.set_source_pixbuf(pixbuf, x, y) cr.set_operator(cairo.OPERATOR_SOURCE) cr.paint() else: #no alpha or scaling is easier: gc = self._backing.new_gc() self._backing.draw_rgb_32_image(gc, x, y, width, height, gdk.RGB_DITHER_NONE, img_data, rowstride) return True
def rgb_reformat(image, rgb_formats, supports_transparency): """ convert the RGB pixel data into a format supported by the client """ #need to convert to a supported format! global PIL pixel_format = image.get_pixel_format() pixels = image.get_pixels() if not PIL: #try to fallback to argb module return argb_swap(image, rgb_formats, supports_transparency) if supports_transparency: modes = PIL_conv.get(pixel_format) else: modes = PIL_conv_noalpha.get(pixel_format) target_rgb = [(im,om) for (im,om) in modes if om in rgb_formats] if len(target_rgb)==0: #try argb module: if argb_swap(image, rgb_formats, supports_transparency): return True warning_key = "rgb_reformat(%s, %s, %s)" % (pixel_format, rgb_formats, supports_transparency) warn_encoding_once(warning_key, "cannot convert %s to one of: %s" % (pixel_format, rgb_formats)) return False input_format, target_format = target_rgb[0] start = time.time() w = image.get_width() h = image.get_height() #PIL cannot use the memoryview directly: pixels = memoryview_to_bytes(pixels) img = PIL.Image.frombuffer(target_format, (w, h), pixels, "raw", input_format, image.get_rowstride()) rowstride = w*len(target_format) #number of characters is number of bytes per pixel! #use tobytes() if present, fallback to tostring(): data_fn = getattr(img, "tobytes", getattr(img, "tostring", None)) data = data_fn("raw", target_format) assert len(data)==rowstride*h, "expected %s bytes in %s format but got %s" % (rowstride*h, len(data)) image.set_pixels(data) image.set_rowstride(rowstride) image.set_pixel_format(target_format) end = time.time() log("rgb_reformat(%s, %s, %s) converted from %s (%s bytes) to %s (%s bytes) in %.1fms, rowstride=%s", image, rgb_formats, supports_transparency, pixel_format, len(pixels), target_format, len(data), (end-start)*1000.0, rowstride) return True
def compress_image(self, image, quality=-1, speed=-1, options={}): log("compress_image(%s, %s)", image, options) #pass the pixels as they are assert image.get_planes()==ImageWrapper.PACKED, "invalid number of planes: %s" % image.get_planes() self.quality = quality self.speed = speed pixels = image.get_pixels() assert pixels, "failed to get pixels from %s" % image #info used by proxy encoder: client_options = { "proxy" : True, "frame" : self.frames, "pts" : image.get_timestamp()-self.first_frame_timestamp, #pass-through encoder options: "options" : options, #redundant metadata: #"width" : image.get_width(), #"height" : image.get_height(), "quality" : quality, "speed" : speed, "timestamp" : image.get_timestamp(), "rowstride" : image.get_rowstride(), "depth" : image.get_depth(), "rgb_format": image.get_pixel_format(), } if self.frames==0: self.first_frame_timestamp = image.get_timestamp() #must pass dst_formats so the proxy can instantiate the video encoder #with the correct CSC config: client_options["dst_formats"] = self.dst_formats if self.scaling!=(1,1): client_options["scaling"] = self.scaling log("compress_image(%s, %s) returning %s bytes and options=%s", image, options, len(pixels), client_options) self.last_frame_times.append(time.time()) self.frames += 1 return memoryview_to_bytes(pixels[:]), client_options
def pixels_to_bytes(v): if type(v) in btypes: return memoryview_to_bytes(v) l = CFDataGetLength(v) return CFDataGetBytes(v, (0, l), None)
def rgb_encode(coding, image, rgb_formats, supports_transparency, speed, rgb_zlib=True, rgb_lz4=True, rgb_lzo=False): pixel_format = image.get_pixel_format() #log("rgb_encode%s pixel_format=%s, rgb_formats=%s", (coding, image, rgb_formats, supports_transparency, speed, rgb_zlib, rgb_lz4), pixel_format, rgb_formats) if pixel_format not in rgb_formats: if not rgb_reformat(image, rgb_formats, supports_transparency): raise Exception("cannot find compatible rgb format to use for %s! (supported: %s)" % (pixel_format, rgb_formats)) #get the new format: pixel_format = image.get_pixel_format() #switch encoding if necessary: if len(pixel_format)==4 and coding=="rgb24": coding = "rgb32" elif len(pixel_format)==3 and coding=="rgb32": coding = "rgb24" #always tell client which pixel format we are sending: options = {"rgb_format" : pixel_format} #we may want to re-stride: restride_image(image) #compress here and return a wrapper so network code knows it is already zlib compressed: pixels = image.get_pixels() width = image.get_width() height = image.get_height() stride = image.get_rowstride() #compression stage: #by default, wire=raw: raw_data = pixels level = 0 algo = "not" if len(pixels)>=256 and (rgb_zlib and compression.use_zlib) or (rgb_lz4 and compression.use_lz4) or (rgb_lzo and compression.use_lzo): level = max(0, min(5, int(115-speed)/20)) if len(pixels)<1024: #fewer pixels, make it more likely we won't bother compressing: level = level // 2 if level>0: if rgb_lz4 and compression.use_lz4: cwrapper = compression.compressed_wrapper(coding, raw_data, lz4=True) algo = "lz4" level = 1 elif rgb_lzo and compression.use_lzo: cwrapper = compression.compressed_wrapper(coding, raw_data, lzo=True) algo = "lzo" level = 1 elif rgb_zlib and compression.use_zlib: cwrapper = compression.compressed_wrapper(coding, raw_data, zlib=True, level=level) algo = "zlib" else: cwrapper = None if cwrapper is None or len(cwrapper)>=(len(raw_data)-32): #compressed is actually bigger! (fall through to uncompressed) level = 0 else: #add compressed marker: options[algo] = level #remove network layer compression marker #so that this data will be decompressed by the decode thread client side: cwrapper.level = 0 if level==0: #can't pass a raw buffer to bencode / rencode: cwrapper = compression.Compressed(coding, memoryview_to_bytes(raw_data), True) if pixel_format.upper().find("A")>=0 or pixel_format.upper().find("X")>=0: bpp = 32 else: bpp = 24 log("rgb_encode using level=%s, %s compressed %sx%s in %s/%s: %s bytes down to %s", level, algo, image.get_width(), image.get_height(), coding, pixel_format, len(pixels), len(cwrapper.data)) #wrap it using "Compressed" so the network layer receiving it #won't decompress it (leave it to the client's draw thread) return coding, cwrapper, options, width, height, stride, bpp
def encode(coding, image, quality, speed, supports_transparency): pixel_format = image.get_pixel_format() w = image.get_width() h = image.get_height() rgb = { "XRGB" : "RGB", "BGRX" : "RGB", "RGBA" : "RGBA", "BGRA" : "RGBA", }.get(pixel_format, pixel_format) bpp = 32 #remove transparency if it cannot be handled: try: pixels = image.get_pixels() assert pixels, "failed to get pixels from %s" % image #PIL cannot use the memoryview directly: if type(pixels)!=_buffer: pixels = memoryview_to_bytes(pixels) #it is safe to use frombuffer() here since the convert() #calls below will not convert and modify the data in place #and we save the compressed data then discard the image im = PIL.Image.frombuffer(rgb, (w, h), pixels, "raw", pixel_format, image.get_rowstride()) if coding.startswith("png") and not supports_transparency and rgb=="RGBA": im = im.convert("RGB") rgb = "RGB" bpp = 24 except Exception: log.error("PIL_encode%s converting %s pixels from %s to %s failed", (w, h, coding, "%s bytes" % image.get_size(), pixel_format, image.get_rowstride()), type(pixels), pixel_format, rgb, exc_info=True) raise buf = BytesIOClass() client_options = {} #only optimize with Pillow>=2.2 and when speed is zero if coding in ("jpeg", "webp"): q = int(min(99, max(1, quality))) kwargs = im.info kwargs["quality"] = q client_options["quality"] = q if coding=="jpeg" and PIL_can_optimize and speed<70: #(optimizing jpeg is pretty cheap and worth doing) kwargs["optimize"] = True client_options["optimize"] = True im.save(buf, coding.upper(), **kwargs) else: assert coding in ("png", "png/P", "png/L"), "unsupported png encoding: %s" % coding if coding in ("png/L", "png/P") and supports_transparency and rgb=="RGBA": #grab alpha channel (the last one): #we use the last channel because we know it is RGBA, #otherwise we should do: alpha_index= image.getbands().index('A') alpha = im.split()[-1] #convert to simple on or off mask: #set all pixel values below 128 to 255, and the rest to 0 def mask_value(a): if a<=128: return 255 return 0 mask = PIL.Image.eval(alpha, mask_value) else: #no transparency mask = None if coding=="png/L": im = im.convert("L", palette=PIL.Image.ADAPTIVE, colors=255) bpp = 8 elif coding=="png/P": #I wanted to use the "better" adaptive method, #but this does NOT work (produces a black image instead): #im.convert("P", palette=Image.ADAPTIVE) im = im.convert("P", palette=PIL.Image.WEB, colors=255) bpp = 8 if mask: # paste the alpha mask to the color of index 255 im.paste(255, mask) kwargs = im.info if mask is not None: client_options["transparency"] = 255 kwargs["transparency"] = 255 if PIL_can_optimize and speed==0: #optimizing png is very rarely worth doing kwargs["optimize"] = True client_options["optimize"] = True #level can range from 0 to 9, but anything above 5 is way too slow for small gains: #76-100 -> 1 #51-76 -> 2 #etc level = max(1, min(5, (125-speed)//25)) kwargs["compress_level"] = level client_options["compress_level"] = level #default is good enough, no need to override, other options: #DEFAULT_STRATEGY, FILTERED, HUFFMAN_ONLY, RLE, FIXED #kwargs["compress_type"] = PIL.Image.DEFAULT_STRATEGY im.save(buf, "PNG", **kwargs) log("sending %sx%s %s as %s, mode=%s, options=%s", w, h, pixel_format, coding, im.mode, kwargs) data = buf.getvalue() buf.close() return coding, compression.Compressed(coding, data), client_options, image.get_width(), image.get_height(), 0, bpp
def mmap_write(mmap_area, mmap_size, data): """ Sends 'data' to the client via the mmap shared memory region, returns the chunks of the mmap area used (or None if it failed) and the mmap area's free memory. """ #This is best explained using diagrams: #mmap_area=[&S&E-------------data-------------] #The first pair of 4 bytes are occupied by: #S=data_start index is only updated by the client and tells us where it has read up to #E=data_end index is only updated here and marks where we have written up to (matches current seek) # '-' denotes unused/available space # '+' is for data we have written # '*' is for data we have just written in this call # E and S show the location pointed to by data_start/data_end mmap_data_start = int_from_buffer(mmap_area, 0) mmap_data_end = int_from_buffer(mmap_area, 4) start = max(8, mmap_data_start.value) end = max(8, mmap_data_end.value) l = len(data) log("mmap: start=%i, end=%i, size of data to write=%i", start, end, l) if end<start: #we have wrapped around but the client hasn't yet: #[++++++++E--------------------S+++++] #so there is one chunk available (from E to S) which we will use: #[++++++++************E--------S+++++] available = start-end chunk = available else: #we have not wrapped around yet, or the client has wrapped around too: #[------------S++++++++++++E---------] #so there are two chunks available (from E to the end, from the start to S): #[****--------S++++++++++++E*********] chunk = mmap_size-end available = chunk+(start-8) #update global mmap stats: mmap_free_size = available-l if l>(mmap_size-8): log.warn("Warning: mmap area is too small!") log.warn(" we need to store %s bytes but the mmap area is limited to %i", l, (mmap_size-8)) return None, mmap_free_size elif mmap_free_size<=0: log.warn("Warning: mmap area is full!") log.warn(" we need to store %s bytes but only have %s free space left", l, available) return None, mmap_free_size if l<chunk: """ data fits in the first chunk """ #ie: initially: #[----------------------------------] #[*********E------------------------] #or if data already existed: #[+++++++++E------------------------] #[+++++++++**********E--------------] mmap_area.seek(end) mmap_area.write(memoryview_to_bytes(data)) data = [(end, l)] mmap_data_end.value = end+l else: """ data does not fit in first chunk alone """ if available>=(mmap_size/2) and available>=(l*3) and l<(start-8): """ still plenty of free space, don't wrap around: just start again """ #[------------------S+++++++++E------] #[*******E----------S+++++++++-------] mmap_area.seek(8) mmap_area.write(memoryview_to_bytes(data)) data = [(8, l)] mmap_data_end.value = 8+l else: """ split in 2 chunks: wrap around the end of the mmap buffer """ #[------------------S+++++++++E------] #[******E-----------S+++++++++*******] mmap_area.seek(end) mmap_area.write(memoryview_to_bytes(data[:chunk])) mmap_area.seek(8) mmap_area.write(memoryview_to_bytes(data[chunk:])) l2 = l-chunk data = [(end, chunk), (8, l2)] mmap_data_end.value = 8+l2 log("sending damage with mmap: %s", data) return data, mmap_free_size
def pixels_to_bytes(v): if isinstance(v, NSCFData): l = CFDataGetLength(v) return CFDataGetBytes(v, (0, l), None) return memoryview_to_bytes(v)
def rgb_encode(coding, image, rgb_formats, supports_transparency, speed, rgb_zlib=True, rgb_lz4=True, rgb_lzo=False, encoding_client_options=True, supports_rgb24zlib=True): pixel_format = image.get_pixel_format() #log("rgb_encode%s pixel_format=%s, rgb_formats=%s", (coding, image, rgb_formats, supports_transparency, speed, rgb_zlib, rgb_lz4, encoding_client_options, supports_rgb24zlib), pixel_format, rgb_formats) if pixel_format not in rgb_formats: if not rgb_reformat(image, rgb_formats, supports_transparency): raise Exception("cannot find compatible rgb format to use for %s! (supported: %s)" % (pixel_format, rgb_formats)) #get the new format: pixel_format = image.get_pixel_format() #switch encoding if necessary: if len(pixel_format)==4 and coding=="rgb24": coding = "rgb32" elif len(pixel_format)==3 and coding=="rgb32": coding = "rgb24" #always tell client which pixel format we are sending: options = {"rgb_format" : pixel_format} #compress here and return a wrapper so network code knows it is already zlib compressed: pixels = image.get_pixels() #special case for when rowstride is so much bigger than the width #that we would end up sending large chunks of padding with each row of pixels #this happens with XShm pixel data (the default) stride = image.get_rowstride() width = image.get_width() rstride = roundup(width*len(pixel_format), 4) #a reasonable stride: rounded up to 4 height = image.get_height() if stride>8 and rstride<stride: al = len(pixels) #current buffer size el = rstride*height #desirable size we could have if al-el>1024 and el*110/100<al: #is it worth re-striding to save space? #we'll save at least 1KB and 10%, do it #Note: we could also change the pixel format whilst we're at it # and convert BGRX to RGB for example (assuming RGB is also supported by the client) rows = [] for y in range(height): rows.append(memoryview_to_bytes(pixels[stride*y:stride*y+rstride])) pixels = "".join(rows) log("rgb_encode: %s pixels re-stride saving %i%% from %s (%s bytes) to %s (%s bytes)", pixel_format, 100-100*el/al, stride, al, rstride, el) stride = rstride #compression #by default, wire=raw: raw_data = str(pixels) wire_data = raw_data level = 0 algo = "not" if len(pixels)>=256 and (rgb_zlib and compression.use_zlib) or (rgb_lz4 and compression.use_lz4) or (rgb_lzo and compression.use_lzo): level = max(0, min(5, int(115-speed)/20)) if len(pixels)<1024: #fewer pixels, make it more likely we won't bother compressing: level = level / 2 if level>0: if rgb_lz4 and compression.use_lz4: wire_data = compression.compressed_wrapper(coding, pixels, lz4=True) algo = "lz4" level = 1 elif rgb_lzo and compression.use_lzo: wire_data = compression.compressed_wrapper(coding, pixels, lzo=True) algo = "lzo" level = 1 else: assert rgb_zlib and compression.use_zlib wire_data = compression.compressed_wrapper(coding, pixels, zlib=True, level=level) algo = "zlib" raw_data = wire_data.data #log("%s/%s data compressed from %s bytes down to %s (%s%%) with lz4=%s", # coding, pixel_format, len(pixels), len(raw_data), int(100.0*len(raw_data)/len(pixels)), self.rgb_lz4) if len(raw_data)>=(len(pixels)-32): #compressed is actually bigger! (use uncompressed) level = 0 wire_data = str(pixels) raw_data = wire_data else: #add compressed marker: options[algo] = level if pixel_format.upper().find("A")>=0 or pixel_format.upper().find("X")>=0: bpp = 32 else: bpp = 24 log("rgb_encode using level=%s, %s compressed %sx%s in %s/%s: %s bytes down to %s", level, algo, image.get_width(), image.get_height(), coding, pixel_format, len(pixels), len(raw_data)) if not encoding_client_options or not supports_rgb24zlib: return coding, wire_data, {}, width, height, stride, bpp #wrap it using "Compressed" so the network layer receiving it #won't decompress it (leave it to the client's draw thread) return coding, compression.Compressed(coding, raw_data, True), options, width, height, stride, bpp
def _do_paint_rgb(self, bpp, img_data, x, y, width, height, rowstride, options): log("%s._do_paint_rgb(%s, %s bytes, x=%d, y=%d, width=%d, height=%d, rowstride=%d, options=%s)", self, bpp, len(img_data), x, y, width, height, rowstride, options) context = self.gl_context() if not context: log("%s._do_paint_rgb(..) no context!", self) return False #TODO: move this code up to the decode thread section #prepare the pixel buffer for upload: t = type(img_data) if t==memoryview_type: if not zerocopy_upload: #not safe, make a copy :( img_data = memoryview_to_bytes(img_data) upload = "copy:memoryview_to_bytes" else: upload = "zerocopy:memoryview" elif t in (str, buffer_type) and zerocopy_upload: #we can zerocopy if we wrap it: img_data = memoryview_type(img_data) upload = "zerocopy:memoryview", t elif t!=str: if hasattr(img_data, "raw"): img_data = img_data.raw upload = "zerocopy:mmap" else: #everything else.. copy to bytes (aka str): img_data = str(img_data) upload = "copy:str", t else: upload = "copy:str" with context: self.gl_init() self.set_rgb_paint_state() rgb_format = options.get(b"rgb_format") if not rgb_format: #Older servers may not tell us the pixel format, so we must infer it: if bpp==24: rgb_format = "RGB" else: assert bpp==32 rgb_format = "RGBA" else: rgb_format = rgb_format.decode() #convert it to a GL constant: pformat = PIXEL_FORMAT_TO_CONSTANT.get(rgb_format) assert pformat is not None, "could not find pixel format for %s (bpp=%s)" % (rgb_format, bpp) bytes_per_pixel = len(rgb_format) #ie: BGRX -> 4 # Compute alignment and row length row_length = 0 alignment = 1 for a in [2, 4, 8]: # Check if we are a-aligned - ! (var & 0x1) means 2-aligned or better, 0x3 - 4-aligned and so on if (rowstride & a-1) == 0: alignment = a # If number of extra bytes is greater than the alignment value, # then we also have to set row_length # Otherwise it remains at 0 (= width implicitely) if (rowstride - width * bytes_per_pixel) >= alignment: row_length = width + (rowstride - width * bytes_per_pixel) // bytes_per_pixel self.gl_marker("%s %sbpp update at (%d,%d) size %dx%d (%s bytes), stride=%d, row length %d, alignment %d, using GL %s format=%s", rgb_format, bpp, x, y, width, height, len(img_data), rowstride, row_length, alignment, upload, CONSTANT_TO_PIXEL_FORMAT.get(pformat)) # Upload data as temporary RGB texture glBindTexture(GL_TEXTURE_RECTANGLE_ARB, self.textures[TEX_RGB]) glPixelStorei(GL_UNPACK_ROW_LENGTH, row_length) glPixelStorei(GL_UNPACK_ALIGNMENT, alignment) glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MAG_FILTER, GL_NEAREST) glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MIN_FILTER, GL_NEAREST) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER) glTexImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, self.texture_pixel_format, width, height, 0, pformat, GL_UNSIGNED_BYTE, img_data) # Draw textured RGB quad at the right coordinates glBegin(GL_QUADS) glTexCoord2i(0, 0) glVertex2i(x, y) glTexCoord2i(0, height) glVertex2i(x, y+height) glTexCoord2i(width, height) glVertex2i(x+width, y+height) glTexCoord2i(width, 0) glVertex2i(x+width, y) glEnd() # Present update to screen self.present_fbo(options.get("encoding"), options.get("delta", -1)>=0, x, y, width, height) # present_fbo has reset state already return True
def convert_image_yuv(self, image): start = time.time() iplanes = image.get_planes() width = image.get_width() height = image.get_height() strides = image.get_rowstride() pixels = image.get_pixels() assert iplanes==ImageWrapper._3_PLANES, "we only handle planar data as input!" assert image.get_pixel_format()==self.src_format, "invalid source format: %s (expected %s)" % (image.get_pixel_format(), self.src_format) assert len(strides)==len(pixels)==3, "invalid number of planes or strides (should be 3)" assert width>=self.src_width and height>=self.src_height, "expected source image with dimensions of at least %sx%s but got %sx%s" % (self.src_width, self.src_height, width, height) #adjust work dimensions for subsampling: #(we process N pixels at a time in each dimension) divs = get_subsampling_divs(self.src_format) wwidth = dimdiv(self.dst_width, max(x_div for x_div, _ in divs)) wheight = dimdiv(self.dst_height, max(y_div for _, y_div in divs)) globalWorkSize, localWorkSize = self.get_work_sizes(wwidth, wheight) kernelargs = [self.queue, globalWorkSize, localWorkSize] iformat = pyopencl.ImageFormat(pyopencl.channel_order.R, pyopencl.channel_type.UNSIGNED_INT8) input_images = [] for i in range(3): _, y_div = divs[i] plane = pixels[i] if type(plane)==_memoryview: plane = memoryview_to_bytes(plane) if type(plane)==str: flags = mem_flags.READ_ONLY | mem_flags.COPY_HOST_PTR else: flags = mem_flags.READ_ONLY | mem_flags.USE_HOST_PTR shape = strides[i], self.src_height//y_div iimage = pyopencl.Image(self.context, flags, iformat, shape=shape, hostbuf=plane) input_images.append(iimage) #output image: oformat = pyopencl.ImageFormat(self.channel_order, pyopencl.channel_type.UNORM_INT8) oimage = pyopencl.Image(self.context, mem_flags.WRITE_ONLY, oformat, shape=(self.dst_width, self.dst_height)) kernelargs += input_images + [numpy.int32(self.src_width), numpy.int32(self.src_height), numpy.int32(self.dst_width), numpy.int32(self.dst_height), self.sampler, oimage] kstart = time.time() log("convert_image(%s) calling %s%s after upload took %.1fms", image, self.kernel_function_name, tuple(kernelargs), 1000.0*(kstart-start)) self.kernel_function(*kernelargs) self.queue.finish() #free input images: for iimage in input_images: iimage.release() kend = time.time() log("%s took %.1fms", self.kernel_function, 1000.0*(kend-kstart)) out_array = numpy.empty(self.dst_width*self.dst_height*4, dtype=numpy.byte) pyopencl.enqueue_read_image(self.queue, oimage, (0, 0), (self.dst_width, self.dst_height), out_array) self.queue.finish() log("readback using %s took %.1fms", CHANNEL_ORDER_TO_STR.get(self.channel_order), 1000.0*(time.time()-kend)) self.time += time.time()-start self.frames += 1 return ImageWrapper(0, 0, self.dst_width, self.dst_height, out_array.data, self.dst_format, 24, self.dst_width*4, planes=ImageWrapper.PACKED)
def convert_image_rgb(self, image): start = time.time() iplanes = image.get_planes() width = image.get_width() height = image.get_height() stride = image.get_rowstride() pixels = image.get_pixels() #log("convert_image(%s) planes=%s, pixels=%s, size=%s", image, iplanes, type(pixels), len(pixels)) assert iplanes==ImageWrapper.PACKED, "we only handle packed data as input!" assert image.get_pixel_format()==self.src_format, "invalid source format: %s (expected %s)" % (image.get_pixel_format(), self.src_format) assert width>=self.src_width and height>=self.src_height, "expected source image with dimensions of at least %sx%s but got %sx%s" % (self.src_width, self.src_height, width, height) #adjust work dimensions for subsampling: #(we process N pixels at a time in each dimension) divs = get_subsampling_divs(self.dst_format) wwidth = dimdiv(self.dst_width, max([x_div for x_div, _ in divs])) wheight = dimdiv(self.dst_height, max([y_div for _, y_div in divs])) globalWorkSize, localWorkSize = self.get_work_sizes(wwidth, wheight) #input image: iformat = pyopencl.ImageFormat(self.channel_order, pyopencl.channel_type.UNSIGNED_INT8) shape = (stride//4, self.src_height) log("convert_image() type=%s, input image format=%s, shape=%s, work size: local=%s, global=%s", type(pixels), iformat, shape, localWorkSize, globalWorkSize) idata = pixels if type(idata)==_memoryview: idata = memoryview_to_bytes(idata) if type(idata)==str: #str is not a buffer, so we have to copy the data #alternatively, we could copy it first ourselves using this: #pixels = numpy.fromstring(pixels, dtype=numpy.byte).data #but I think this would be even slower flags = mem_flags.READ_ONLY | mem_flags.COPY_HOST_PTR else: flags = mem_flags.READ_ONLY | mem_flags.USE_HOST_PTR iimage = pyopencl.Image(self.context, flags, iformat, shape=shape, hostbuf=idata) kernelargs = [self.queue, globalWorkSize, localWorkSize, iimage, numpy.int32(self.src_width), numpy.int32(self.src_height), numpy.int32(self.dst_width), numpy.int32(self.dst_height), self.sampler] #calculate plane strides and allocate output buffers: strides = [] out_buffers = [] out_sizes = [] for i in range(3): x_div, y_div = divs[i] p_stride = roundup(self.dst_width // x_div, max(2, localWorkSize[0])) p_height = roundup(self.dst_height // y_div, 2) p_size = p_stride * p_height #log("output buffer for channel %s: stride=%s, height=%s, size=%s", i, p_stride, p_height, p_size) out_buf = pyopencl.Buffer(self.context, mem_flags.WRITE_ONLY, p_size) out_buffers.append(out_buf) kernelargs += [out_buf, numpy.int32(p_stride)] strides.append(p_stride) out_sizes.append(p_size) kstart = time.time() log("convert_image(%s) calling %s%s after %.1fms", image, self.kernel_function_name, tuple(kernelargs), 1000.0*(kstart-start)) self.kernel_function(*kernelargs) self.queue.finish() #free input image: iimage.release() kend = time.time() log("%s took %.1fms", self.kernel_function_name, 1000.0*(kend-kstart)) #read back: pixels = [] for i in range(3): out_array = numpy.empty(out_sizes[i], dtype=numpy.byte) pixels.append(out_array.data) pyopencl.enqueue_read_buffer(self.queue, out_buffers[i], out_array, is_blocking=False) readstart = time.time() log("queue read events took %.1fms (3 planes of size %s, with strides=%s)", 1000.0*(readstart-kend), out_sizes, strides) self.queue.finish() readend = time.time() log("wait for read events took %.1fms", 1000.0*(readend-readstart)) #free output buffers: for out_buf in out_buffers: out_buf.release() return ImageWrapper(0, 0, self.dst_width, self.dst_height, pixels, self.dst_format, 24, strides, planes=ImageWrapper._3_PLANES)