def process_client_packet(self, proto, packet): packet_type = packet[0] log("process_client_packet: %s", packet_type) if packet_type == Protocol.CONNECTION_LOST: self.stop("client connection lost", proto) return elif packet_type == "disconnect": log("got disconnect from client: %s", packet[1]) if self.exit: self.client_protocol.close() else: self.stop("disconnect from client: %s" % packet[1]) elif packet_type == "set_deflate": #echo it back to the client: self.client_packets.put(packet) self.client_protocol.source_has_more() return elif packet_type == b"send-file": if packet[6]: packet[6] = Compressed("file-data", packet[6]) elif packet_type == b"send-file-chunk": if packet[3]: packet[3] = Compressed("file-chunk-data", packet[3]) elif packet_type == "hello": log.warn( "Warning: invalid hello packet received after initial authentication (dropped)" ) return self.queue_server_packet(packet)
def process_client_packet(self, proto, packet): packet_type = bytestostr(packet[0]) log("process_client_packet: %s", packet_type) if packet_type == CONNECTION_LOST: self.stop(proto, "client connection lost") return if packet_type == "set_deflate": #echo it back to the client: self.client_packets.put(packet) self.client_protocol.source_has_more() return if packet_type == "hello": if not self.client_challenge_packet: log.warn("Warning: invalid hello packet from client") log.warn(" received after initial authentication (dropped)") return log("forwarding client hello") log(" for challenge packet %s", self.client_challenge_packet) #update caps with latest hello caps from client: self.caps = typedict(packet[1]) #keep challenge data in the hello response: hello = self.filter_client_caps(self.caps, CLIENT_REMOVE_CAPS_CHALLENGE) self.queue_server_packet(("hello", hello)) return if packet_type == "ping_echo" and self.client_ping_timer and len( packet) >= 7 and strtobytes(packet[6]) == strtobytes( self.uuid): #this is one of our ping packets: self.client_last_ping_echo = packet[1] self.client_last_ping_latency = 1000 * monotonic( ) - self.client_last_ping_echo log("ping-echo: client latency=%.1fms", self.client_last_ping_latency) return #the packet types below are forwarded: if packet_type == "disconnect": reasons = tuple(bytestostr(x) for x in packet[1:]) log("got disconnect from client: %s", csv(reasons)) if self.exit: self.client_protocol.close() else: self.stop(None, "disconnect from client", *reasons) elif packet_type == "send-file": if packet[6]: packet[6] = Compressed("file-data", packet[6]) elif packet_type == "send-file-chunk": if packet[3]: packet[3] = Compressed("file-chunk-data", packet[3]) self.queue_server_packet(packet)
def send_sound_data(self, sound_source, data, metadata, packet_metadata=None): codec = sound_source.codec packet_data = [codec, Compressed(codec, data), metadata] if packet_metadata: assert self.server_sound_bundle_metadata packet_data.append(packet_metadata) self.send("sound-data", *packet_data)
def passthrough(strip_alpha=True): enclog( "proxy draw: %s passthrough (rowstride: %s vs %s, strip alpha=%s)", rgb_format, rowstride, client_options.get("rowstride", 0), strip_alpha) if strip_alpha: #passthrough as plain RGB: Xindex = rgb_format.upper().find("X") if Xindex >= 0 and len(rgb_format) == 4: #force clear alpha (which may be garbage): newdata = bytearray(pixels) for i in range(len(pixels) / 4): newdata[i * 4 + Xindex] = chr(255) packet[9] = client_options.get("rowstride", 0) cdata = bytes(newdata) else: cdata = pixels new_client_options = {"rgb_format": rgb_format} else: #preserve cdata = pixels new_client_options = client_options wrapped = Compressed("%s pixels" % encoding, cdata) #FIXME: we should not assume that rgb32 is supported here... #(we may have to convert to rgb24..) return send_updated("rgb32", wrapped, new_client_options)
def new_sound_buffer(self, sound_source, data, metadata, packet_metadata=None): log("new_sound_buffer(%s, %s, %s, %s) info=%s", sound_source, len(data or []), metadata, [len(x) for x in packet_metadata], sound_source.info) if self.sound_source != sound_source or self.is_closed(): log("sound buffer dropped: from old source or closed") return if sound_source.sequence < self.sound_source_sequence: log( "sound buffer dropped: old sequence number: %s (current is %s)", sound_source.sequence, self.sound_source_sequence) return if packet_metadata: if not self.sound_bundle_metadata: #client does not support bundling, send packet metadata as individual packets before the main packet: for x in packet_metadata: self.send_sound_data(sound_source, x, {}) packet_metadata = () else: #the packet metadata is compressed already: packet_metadata = Compressed("packet metadata", packet_metadata, can_inline=True) #don't drop the first 10 buffers can_drop_packet = (sound_source.info or {}).get("buffer_count", 0) > 10 self.send_sound_data(sound_source, data, metadata, packet_metadata, can_drop_packet)
def send_sound_data(self, sound_source, data, metadata, packet_metadata=None, can_drop_packet=False): packet_data = [ sound_source.codec, Compressed(sound_source.codec, data), metadata ] if packet_metadata: assert self.sound_bundle_metadata packet_data.append(packet_metadata) sequence = sound_source.sequence if sequence >= 0: metadata["sequence"] = sequence fail_cb = None if can_drop_packet: def sound_data_fail_cb(): #ideally we would tell gstreamer to send an audio "key frame" #or synchronization point to ensure the stream recovers log("a sound data buffer was not received and will not be resent" ) fail_cb = sound_data_fail_cb self.send("sound-data", *packet_data, synchronous=False, fail_cb=fail_cb, will_have_more=True)
def make_test_packets(self, pixel_data_size=2**18): pixel_data = os.urandom(pixel_data_size) return ( ("test", 1, 2, 3), ("ping", 100, 200, 300, 0), ("draw", 100, 100, 640, 480, Compressed("pixel-data", pixel_data), {}), )
def do_send_cursor(self, delay): self.cursor_timer = None cd = self.get_cursor_data_cb() if cd and cd[0]: cursor_data = list(cd[0]) cursor_sizes = cd[1] #skip first two fields (if present) as those are coordinates: if self.last_cursor_sent and self.last_cursor_sent[ 2:9] == cursor_data[2:9]: cursorlog( "do_send_cursor(..) cursor identical to the last one we sent, nothing to do" ) return self.last_cursor_sent = cursor_data[:9] w, h, _xhot, _yhot, serial, pixels, name = cursor_data[2:9] #compress pixels if needed: encoding = None if pixels is not None: #convert bytearray to string: cpixels = strtobytes(pixels) if "png" in self.cursor_encodings: from xpra.codecs.loader import get_codec PIL = get_codec("PIL") assert PIL cursorlog( "do_send_cursor() loading %i bytes of cursor pixel data for %ix%i cursor named '%s'", len(cpixels), w, h, name) img = PIL.Image.frombytes("RGBA", (w, h), cpixels, "raw", "BGRA", w * 4, 1) buf = BytesIOClass() img.save(buf, "PNG") pngdata = buf.getvalue() buf.close() cpixels = Compressed("png cursor", pngdata, can_inline=True) encoding = "png" if SAVE_CURSORS: with open("raw-cursor-%#x.png" % serial, "wb") as f: f.write(pngdata) elif len(cpixels) >= 256 and ("raw" in self.cursor_encodings or not self.cursor_encodings): cpixels = self.compressed_wrapper("cursor", pixels) cursorlog("do_send_cursor(..) pixels=%s ", cpixels) encoding = "raw" cursor_data[7] = cpixels cursorlog( "do_send_cursor(..) %sx%s %s cursor name='%s', serial=%#x with delay=%s (cursor_encodings=%s)", w, h, (encoding or "empty"), name, serial, delay, self.cursor_encodings) args = list(cursor_data[:9]) + [cursor_sizes[0]] + list( cursor_sizes[1]) if self.cursor_encodings and encoding: args = [encoding] + args else: cursorlog("do_send_cursor(..) sending empty cursor with delay=%s", delay) args = [""] self.last_cursor_sent = None self.send_more("cursor", *args)
def new_sound_buffer(self, sound_source, data, metadata, packet_metadata=()): log("new_sound_buffer(%s, %s, %s, %s)", sound_source, len(data or ()), metadata, packet_metadata) if sound_source.sequence < self.sound_source_sequence: log( "sound buffer dropped: old sequence number: %s (current is %s)", sound_source.sequence, self.sound_source_sequence) return self.sound_out_bytecount += len(data) for x in packet_metadata: self.sound_out_bytecount += len(x) metadata["sequence"] = sound_source.sequence if packet_metadata: if not self.server_sound_bundle_metadata: #server does not support bundling, send packet metadata as individual packets before the main packet: for x in packet_metadata: self.send_sound_data(sound_source, x, metadata) packet_metadata = () else: #the packet metadata is compressed already: packet_metadata = Compressed("packet metadata", packet_metadata, can_inline=True) self.send_sound_data(sound_source, data, metadata, packet_metadata)
def passthrough(strip_alpha=True): enclog( "proxy draw: %s passthrough (rowstride: %s vs %s, strip alpha=%s)", rgb_format, rowstride, client_options.intget("rowstride", 0), strip_alpha) if strip_alpha: #passthrough as plain RGB: Xindex = rgb_format.upper().find("X") if Xindex >= 0 and len(rgb_format) == 4: #force clear alpha (which may be garbage): newdata = bytearray(pixels) c = chr(255) for i in range(len(pixels) / 4): newdata[i * 4 + Xindex] = c packet[9] = client_options.intget("rowstride", 0) cdata = bytes(newdata) else: cdata = pixels new_client_options = {"rgb_format": rgb_format} else: #preserve cdata = pixels new_client_options = client_options wrapped = Compressed("%s pixels" % encoding, cdata) #rgb32 is always supported by all clients: return send_updated(encoding, wrapped, new_client_options)
def do_make_screenshot_packet(self): w, h, encoding, rowstride, data = self.root_window_model.take_screenshot( ) assert encoding == "png" #use fixed encoding for now return [ "screenshot", w, h, encoding, rowstride, Compressed(encoding, data) ]
def compressed_wrapper(self, datatype, data, min_saving=128): if self.zlib or self.lz4 or self.lzo: cw = compressed_wrapper(datatype, data, zlib=self.zlib, lz4=self.lz4, lzo=self.lzo, can_inline=False) if len(cw)+min_saving<=len(data): #the compressed version is smaller, use it: return cw #skip compressed version: fall through #we can't compress, so at least avoid warnings in the protocol layer: return Compressed(datatype, data, can_inline=True)
def do_make_screenshot_packet(self): capture = GTKImageCapture(self.root) w, h, encoding, rowstride, data = capture.take_screenshot() assert encoding == "png" #use fixed encoding for now from xpra.net.compression import Compressed return [ "screenshot", w, h, encoding, rowstride, Compressed(encoding, data) ]
def compressed_wrapper(self, datatype, data, level=5): """ Dummy utility method for compressing data. Actual client implementations will provide compression based on the client and server capabilities (lz4, lzo, zlib). """ #sub-classes should override this assert level>=0 from xpra.net.compression import Compressed return Compressed("raw %s" % datatype, data, can_inline=True)
def do_make_screenshot_packet(self): assert len(self._id_to_window ) == 1, "multi root window screenshot not implemented yet" rwm = self._id_to_window.values()[0] w, h, encoding, rowstride, data = rwm.take_screenshot() assert encoding == "png" #use fixed encoding for now return [ "screenshot", w, h, encoding, rowstride, Compressed(encoding, data) ]
def send_sound_data(self, sound_source, data, metadata, packet_metadata=None): codec = sound_source.codec packet_data = [ codec, Compressed(codec, data), metadata, packet_metadata or () ] self.send("sound-data", *packet_data)
def do_command(self): printing = self.server_capabilities.boolget("printing") if not printing: self.warn_and_quit(EXIT_UNSUPPORTED, "server does not support printing") return #TODO: compress file data? (this should run locally most of the time anyway) from xpra.net.compression import Compressed blob = Compressed("print", self.file_data) self.send("print", self.filename, blob, *self.command) log("print: sending %s as %s for printing", self.filename, blob) self.idle_add(self.send, "disconnect", DONE, "detaching")
def do_command(self, caps : typedict): printing = caps.boolget("printing") if not printing: self.warn_and_quit(EXIT_UNSUPPORTED, "server does not support printing") return #we don't compress file data #(this should run locally most of the time anyway) from xpra.net.compression import Compressed #pylint: disable=import-outside-toplevel blob = Compressed("print", self.file_data) self.send("print", self.filename, blob, *self.command) log("print: sending %s as %s for printing", self.filename, blob) self.idle_add(self.send, "disconnect", DONE, "detaching")
def make_screenshot_packet_from_regions(self, regions): #regions = array of (wid, x, y, PIL.Image) if not regions: log("screenshot: no regions found, returning empty 0x0 image!") return ["screenshot", 0, 0, "png", -1, ""] #in theory, we could run the rest in a non-UI thread since we're done with GTK.. minx = min(x for (_, x, _, _) in regions) miny = min(y for (_, _, y, _) in regions) maxx = max((x + img.get_width()) for (_, x, _, img) in regions) maxy = max((y + img.get_height()) for (_, _, y, img) in regions) width = maxx - minx height = maxy - miny log("screenshot: %sx%s, min x=%s y=%s", width, height, minx, miny) from PIL import Image #@UnresolvedImport pylint: disable=import-outside-toplevel screenshot = Image.new("RGBA", (width, height)) for wid, x, y, img in reversed(regions): pixel_format = img.get_pixel_format() target_format = { "XRGB": "RGB", "BGRX": "RGB", "BGRA": "RGBA" }.get(pixel_format, pixel_format) pixels = img.get_pixels() w = img.get_width() h = img.get_height() #PIL cannot use the memoryview directly: if isinstance(pixels, memoryview): pixels = pixels.tobytes() try: window_image = Image.frombuffer(target_format, (w, h), pixels, "raw", pixel_format, img.get_rowstride()) except Exception: log.error( "Error parsing window pixels in %s format for window %i", pixel_format, wid, exc_info=True) continue tx = x - minx ty = y - miny screenshot.paste(window_image, (tx, ty)) from io import BytesIO buf = BytesIO() screenshot.save(buf, "png") data = buf.getvalue() buf.close() packet = [ "screenshot", width, height, "png", width * 4, Compressed("png", data) ] log("screenshot: %sx%s %s", packet[1], packet[2], packet[-1]) return packet
def _packet_recompress(self, packet, index, name): if len(packet)>index: data = packet[index] if len(data)<512: packet[index] = str(data) return #FIXME: this is ugly and not generic! zlib = compression.use_zlib and self.caps.boolget("zlib", True) lz4 = compression.use_lz4 and self.caps.boolget("lz4", False) lzo = compression.use_lzo and self.caps.boolget("lzo", False) if zlib or lz4 or lzo: packet[index] = compressed_wrapper(name, data, zlib=zlib, lz4=lz4, lzo=lzo, can_inline=False) else: #prevent warnings about large uncompressed data packet[index] = Compressed("raw %s" % name, data, can_inline=True)
def new_sound_buffer(self, sound_source, data, metadata, packet_metadata=()): log("new_sound_buffer(%s, %s, %s, %s)", sound_source, len(data or ()), metadata, packet_metadata) if not self.sound_source: return self.sound_out_bytecount += len(data) for x in packet_metadata: self.sound_out_bytecount += len(x) if packet_metadata: if not self.server_sound_bundle_metadata: #server does not support bundling, send packet metadata as individual packets before the main packet: for x in packet_metadata: self.send_sound_data(sound_source, x) packet_metadata = () else: #the packet metadata is compressed already: packet_metadata = Compressed("packet metadata", packet_metadata, can_inline=True) self.send_sound_data(sound_source, data, metadata, packet_metadata)
def new_sound_buffer(self, sound_source, data, metadata, packet_metadata=()): log("new_sound_buffer(%s, %s, %s, %s)", sound_source, len(data or ()), metadata, packet_metadata) if sound_source.sequence < self.sound_source_sequence: log( "sound buffer dropped: old sequence number: %s (current is %s)", sound_source.sequence, self.sound_source_sequence) return self.sound_out_bytecount += len(data) for x in packet_metadata: self.sound_out_bytecount += len(x) metadata["sequence"] = sound_source.sequence if packet_metadata: #the packet metadata is already compressed: packet_metadata = Compressed("packet metadata", packet_metadata, can_inline=True) self.send_sound_data(sound_source, data, metadata, packet_metadata)
def do_make_screenshot_packet(self): log("grabbing screenshot") regions = [] OR_regions = [] for wid in reversed(sorted(self._id_to_window.keys())): window = self._id_to_window.get(wid) log("screenshot: window(%s)=%s", wid, window) if window is None: continue if window.is_tray(): log("screenshot: skipping tray window %s", wid) continue if not window.is_managed(): log("screenshot: window %s is not/no longer managed", wid) continue if window.is_OR(): x, y = window.get_property("geometry")[:2] else: x, y = self._desktop_manager.window_geometry(window)[:2] log("screenshot: position(%s)=%s,%s", window, x, y) w, h = window.get_dimensions() log("screenshot: size(%s)=%sx%s", window, w, h) try: with xsync: img = window.get_image(0, 0, w, h) except: log.warn("screenshot: window %s could not be captured", wid) continue if img is None: log.warn("screenshot: no pixels for window %s", wid) continue log("screenshot: image=%s, size=%s", (img, img.get_size())) if img.get_pixel_format() not in ("RGB", "RGBA", "XRGB", "BGRX", "ARGB", "BGRA"): log.warn( "window pixels for window %s using an unexpected rgb format: %s", wid, img.get_pixel_format()) continue item = (wid, x, y, img) if window.is_OR(): OR_regions.append(item) elif self._has_focus == wid: #window with focus first (drawn last) regions.insert(0, item) else: regions.append(item) all_regions = OR_regions + regions if len(all_regions) == 0: log("screenshot: no regions found, returning empty 0x0 image!") return ["screenshot", 0, 0, "png", -1, ""] log("screenshot: found regions=%s, OR_regions=%s", len(regions), len(OR_regions)) #in theory, we could run the rest in a non-UI thread since we're done with GTK.. minx = min([x for (_, x, _, _) in all_regions]) miny = min([y for (_, _, y, _) in all_regions]) maxx = max([(x + img.get_width()) for (_, x, _, img) in all_regions]) maxy = max([(y + img.get_height()) for (_, _, y, img) in all_regions]) width = maxx - minx height = maxy - miny log("screenshot: %sx%s, min x=%s y=%s", width, height, minx, miny) from PIL import Image #@UnresolvedImport screenshot = Image.new("RGBA", (width, height)) for wid, x, y, img in reversed(all_regions): pixel_format = img.get_pixel_format() target_format = { "XRGB": "RGB", "BGRX": "RGB", "BGRA": "RGBA" }.get(pixel_format, pixel_format) pixels = img.get_pixels() #PIL cannot use the memoryview directly: if _memoryview and isinstance(pixels, _memoryview): pixels = pixels.tobytes() try: window_image = Image.frombuffer(target_format, (w, h), pixels, "raw", pixel_format, img.get_rowstride()) except: log.error("Error parsing window pixels in %s format", pixel_format, exc_info=True) continue tx = x - minx ty = y - miny screenshot.paste(window_image, (tx, ty)) buf = StringIOClass() screenshot.save(buf, "png") data = buf.getvalue() buf.close() packet = [ "screenshot", width, height, "png", width * 4, Compressed("png", data) ] log("screenshot: %sx%s %s", packet[1], packet[2], packet[-1]) return packet
def encode(coding: str, image, options: dict): pixel_format = image.get_pixel_format() #log("rgb_encode%s pixel_format=%s, rgb_formats=%s", # (coding, image, rgb_formats, supports_transparency, speed, rgb_zlib, rgb_lz4), pixel_format, rgb_formats) if pixel_format in ("BGRX", "BGRA", "RGBA"): rgb_formats = options.get("rgb_formats", ("BGRX", "BGRA", "RGBA")) elif pixel_format in ("RGB", "BGR"): rgb_formats = options.get("rgb_formats", ("RGB", "BGR")) else: raise Exception("unsupported pixel format %s" % pixel_format) supports_transparency = options.get("alpha", True) if pixel_format not in rgb_formats: log( "rgb_encode reformatting because %s not in %s, supports_transparency=%s", pixel_format, rgb_formats, supports_transparency) if not rgb_reformat(image, rgb_formats, supports_transparency): raise Exception( "cannot find compatible rgb format to use for %s! (supported: %s)" % (pixel_format, rgb_formats)) #get the new format: pixel_format = image.get_pixel_format() #switch encoding if necessary: if len(pixel_format) == 4: coding = "rgb32" elif len(pixel_format) == 3: coding = "rgb24" else: raise Exception("invalid pixel format %s" % pixel_format) #we may still want to re-stride: image.may_restride() #always tell client which pixel format we are sending: client_options = {"rgb_format": pixel_format} #compress here and return a wrapper so network code knows it is already zlib compressed: pixels = image.get_pixels() assert pixels, "failed to get pixels from %s" % image width = image.get_width() height = image.get_height() stride = image.get_rowstride() speed = options.get("speed", 50) #compression stage: level = 0 algo = "not" l = len(pixels) lz4 = options.get("lz4", False) if l >= 512 and (lz4 or speed < 100): if l >= 4096: level = 1 + max(0, min(7, int(100 - speed) // 14)) else: #fewer pixels, make it more likely we won't bother compressing #and use a lower level (max=3) level = max(0, min(3, int(125 - speed) // 35)) if level > 0: zlib = options.get("zlib", False) can_inline = l <= 32768 cwrapper = compressed_wrapper(coding, pixels, level=level, zlib=zlib, lz4=lz4, brotli=False, none=False, can_inline=can_inline) if isinstance(cwrapper, LevelCompressed): #add compressed marker: client_options[cwrapper.algorithm] = cwrapper.level & 0xF #remove network layer compression marker #so that this data will be decompressed by the decode thread client side: cwrapper.level = 0 elif can_inline and isinstance(pixels, memoryview): assert isinstance(cwrapper, Compressed) assert cwrapper.data == pixels #compression either did not work or was not enabled #and memoryview pixel data cannot be handled by the packet encoders #so we convert it to bytes so it can still be inlined with the packet data: cwrapper.data = rgb_transform.pixels_to_bytes(pixels) else: #can't pass a raw buffer to bencode / rencode, #and even if we could, the image containing those pixels may be freed by the time we get to the encoder algo = "not" cwrapper = Compressed(coding, rgb_transform.pixels_to_bytes(pixels), True) if pixel_format.find("A") >= 0 or pixel_format.find("X") >= 0: bpp = 32 else: bpp = 24 log( "rgb_encode using level=%s for %5i bytes at %3i speed, %s compressed %4sx%-4s in %s/%s: %5s bytes down to %5s", level, l, speed, algo, width, height, coding, pixel_format, len(pixels), len(cwrapper.data)) #wrap it using "Compressed" so the network layer receiving it #won't decompress it (leave it to the client's draw thread) return coding, cwrapper, client_options, width, height, stride, bpp
def encode(coding: str, image, quality: int, speed: int, supports_transparency: bool, grayscale: bool = False, resize=None): log("pillow.encode%s", (coding, image, quality, speed, supports_transparency, grayscale, resize)) assert coding in ("jpeg", "webp", "png", "png/P", "png/L"), "unsupported encoding: %s" % coding assert image, "no image to encode" pixel_format = bytestostr(image.get_pixel_format()) palette = None w = image.get_width() h = image.get_height() rgb = { "RLE8": "P", "XRGB": "RGB", "BGRX": "RGB", "RGBX": "RGB", "RGBA": "RGBA", "BGRA": "RGBA", "BGR": "RGB", }.get(pixel_format, pixel_format) bpp = 32 pixels = image.get_pixels() assert pixels, "failed to get pixels from %s" % image #remove transparency if it cannot be handled, #and deal with non 24-bit formats: if pixel_format == "r210": stride = image.get_rowstride() from xpra.codecs.argb.argb import r210_to_rgba, r210_to_rgb #@UnresolvedImport if supports_transparency: pixels = r210_to_rgba(pixels, w, h, stride, w * 4) pixel_format = "RGBA" rgb = "RGBA" else: image.set_rowstride(image.get_rowstride() * 3 // 4) pixels = r210_to_rgb(pixels, w, h, stride, w * 3) pixel_format = "RGB" rgb = "RGB" bpp = 24 elif pixel_format == "BGR565": from xpra.codecs.argb.argb import bgr565_to_rgbx, bgr565_to_rgb #@UnresolvedImport if supports_transparency: image.set_rowstride(image.get_rowstride() * 2) pixels = bgr565_to_rgbx(pixels) pixel_format = "RGBA" rgb = "RGBA" else: image.set_rowstride(image.get_rowstride() * 3 // 2) pixels = bgr565_to_rgb(pixels) pixel_format = "RGB" rgb = "RGB" bpp = 24 elif pixel_format == "RLE8": pixel_format = "P" palette = [] #pillow requires 8 bit palette values, #but we get 16-bit values from the image wrapper (X11 palettes are 16-bit): for r, g, b in image.get_palette(): palette.append((r >> 8) & 0xFF) palette.append((g >> 8) & 0xFF) palette.append((b >> 8) & 0xFF) bpp = 8 else: assert pixel_format in ("RGBA", "RGBX", "BGRA", "BGRX", "BGR", "RGB"), \ "invalid pixel format '%s'" % pixel_format try: #PIL cannot use the memoryview directly: if isinstance(pixels, memoryview): pixels = pixels.tobytes() #it is safe to use frombuffer() here since the convert() #calls below will not convert and modify the data in place #and we save the compressed data then discard the image im = Image.frombuffer(rgb, (w, h), pixels, "raw", pixel_format, image.get_rowstride(), 1) if palette: im.putpalette(palette) im.palette = ImagePalette.ImagePalette("RGB", palette=palette, size=len(palette)) if coding != "png/L" and grayscale: if rgb.find( "A") >= 0 and supports_transparency and coding != "jpeg": im = im.convert("LA") else: im = im.convert("L") rgb = "L" bpp = 8 elif coding.startswith( "png") and not supports_transparency and rgb == "RGBA": im = im.convert("RGB") rgb = "RGB" bpp = 24 except Exception: log.error( "pillow.encode%s converting %s pixels from %s to %s failed", (coding, image, speed, supports_transparency, grayscale, resize), type(pixels), pixel_format, rgb, exc_info=True) raise client_options = {} if resize: if speed >= 95: resample = "NEAREST" elif speed > 80: resample = "BILINEAR" elif speed >= 30: resample = "BICUBIC" else: resample = "LANCZOS" resample_value = getattr(Image, resample, 0) im = im.resize(resize, resample=resample_value) client_options["resample"] = resample if coding in ("jpeg", "webp"): #newer versions of pillow require explicit conversion to non-alpha: if pixel_format.find("A") >= 0: im = im.convert("RGB") q = int(min(100, max(1, quality))) kwargs = im.info kwargs["quality"] = q client_options["quality"] = q if coding == "jpeg" and speed < 50: #(optimizing jpeg is pretty cheap and worth doing) kwargs["optimize"] = True client_options["optimize"] = True elif coding == "webp" and q >= 100: kwargs["lossless"] = 1 pil_fmt = coding.upper() else: assert coding in ("png", "png/P", "png/L"), "unsupported encoding: %s" % coding if coding in ("png/L", "png/P") and supports_transparency and rgb == "RGBA": #grab alpha channel (the last one): #we use the last channel because we know it is RGBA, #otherwise we should do: alpha_index= image.getbands().index('A') alpha = im.split()[-1] #convert to simple on or off mask: #set all pixel values below 128 to 255, and the rest to 0 def mask_value(a): if a <= 128: return 255 return 0 mask = Image.eval(alpha, mask_value) else: #no transparency mask = None if coding == "png/L": im = im.convert("L", palette=Image.ADAPTIVE, colors=255) bpp = 8 elif coding == "png/P": #convert to 255 indexed colour if: # * we're not in palette mode yet (source is >8bpp) # * we need space for the mask (256 -> 255) if palette is None or mask: #I wanted to use the "better" adaptive method, #but this does NOT work (produces a black image instead): #im.convert("P", palette=Image.ADAPTIVE) im = im.convert("P", palette=Image.WEB, colors=255) bpp = 8 kwargs = im.info if mask: # paste the alpha mask to the color of index 255 im.paste(255, mask) client_options["transparency"] = 255 kwargs["transparency"] = 255 if speed == 0: #optimizing png is very rarely worth doing kwargs["optimize"] = True client_options["optimize"] = True #level can range from 0 to 9, but anything above 5 is way too slow for small gains: #76-100 -> 1 #51-76 -> 2 #etc level = max(1, min(5, (125 - speed) // 25)) kwargs["compress_level"] = level #no need to expose to the client: #client_options["compress_level"] = level #default is good enough, no need to override, other options: #DEFAULT_STRATEGY, FILTERED, HUFFMAN_ONLY, RLE, FIXED #kwargs["compress_type"] = Image.DEFAULT_STRATEGY pil_fmt = "PNG" buf = BytesIO() im.save(buf, pil_fmt, **kwargs) if SAVE_TO_FILE: # pragma: no cover filename = "./%s.%s" % (time.time(), pil_fmt) im.save(filename, pil_fmt) log.info("saved %s to %s", coding, filename) log("sending %sx%s %s as %s, mode=%s, options=%s", w, h, pixel_format, coding, im.mode, kwargs) data = buf.getvalue() buf.close() return coding, Compressed( coding, data), client_options, image.get_width(), image.get_height(), 0, bpp
def compressed_wrapper(self, datatype, data, level=5): #sub-classes should override this assert level >= 0 from xpra.net.compression import Compressed return Compressed("raw %s" % datatype, data, can_inline=True)
def encode(coding, image, quality, speed, supports_transparency): log("pillow.encode%s", (coding, image, quality, speed, supports_transparency)) pixel_format = image.get_pixel_format() palette = None w = image.get_width() h = image.get_height() rgb = { "RLE8": "P", "XRGB": "RGB", "BGRX": "RGB", "RGBX": "RGB", "RGBA": "RGBA", "BGRA": "RGBA", "BGR": "RGB", }.get(pixel_format, pixel_format) bpp = 32 #remove transparency if it cannot be handled, #and deal with non 24-bit formats: try: pixels = image.get_pixels() assert pixels, "failed to get pixels from %s" % image if pixel_format == "r210": from xpra.codecs.argb.argb import r210_to_rgba, r210_to_rgb #@UnresolvedImport if supports_transparency: pixels = r210_to_rgba(pixels) pixel_format = "RGBA" rgb = "RGBA" else: image.set_rowstride(image.get_rowstride() * 3 // 4) pixels = r210_to_rgb(pixels) pixel_format = "RGB" rgb = "RGB" bpp = 24 elif pixel_format == "BGR565": from xpra.codecs.argb.argb import bgr565_to_rgbx, bgr565_to_rgb #@UnresolvedImport if supports_transparency: image.set_rowstride(image.get_rowstride() * 2) pixels = bgr565_to_rgbx(pixels) pixel_format = "RGBA" rgb = "RGBA" else: image.set_rowstride(image.get_rowstride() * 3 // 2) pixels = bgr565_to_rgb(pixels) pixel_format = "RGB" rgb = "RGB" bpp = 24 elif pixel_format == "RLE8": pixel_format = "P" palette = image.get_palette() rp, gp, bp = [], [], [] for r, g, b in palette: rp.append((r >> 8) & 0xFF) gp.append((g >> 8) & 0xFF) bp.append((b >> 8) & 0xFF) palette = rp + gp + bp #PIL cannot use the memoryview directly: if type(pixels) != _buffer: pixels = memoryview_to_bytes(pixels) #it is safe to use frombuffer() here since the convert() #calls below will not convert and modify the data in place #and we save the compressed data then discard the image im = Image.frombuffer(rgb, (w, h), pixels, "raw", pixel_format, image.get_rowstride(), 1) if palette: im.putpalette(palette) im.palette = ImagePalette.ImagePalette("RGB", palette=palette, size=len(palette)) if coding.startswith( "png") and not supports_transparency and rgb == "RGBA": im = im.convert("RGB") rgb = "RGB" bpp = 24 except Exception: log.error("PIL_encode%s converting %s pixels from %s to %s failed", (w, h, coding, "%s bytes" % image.get_size(), pixel_format, image.get_rowstride()), type(pixels), pixel_format, rgb, exc_info=True) raise buf = BytesIOClass() client_options = {} if coding == "jpeg": #newer versions of pillow require explicit conversion to non-alpha: if pixel_format.find("A") >= 0: im = im.convert("RGB") q = int(min(99, max(1, quality))) kwargs = im.info kwargs["quality"] = q client_options["quality"] = q if speed < 50: #(optimizing jpeg is pretty cheap and worth doing) kwargs["optimize"] = True client_options["optimize"] = True pil_fmt = coding.upper() else: assert coding in ("png", "png/P", "png/L"), "unsupported encoding: %s" % coding if coding in ("png/L", "png/P") and supports_transparency and rgb == "RGBA": #grab alpha channel (the last one): #we use the last channel because we know it is RGBA, #otherwise we should do: alpha_index= image.getbands().index('A') alpha = im.split()[-1] #convert to simple on or off mask: #set all pixel values below 128 to 255, and the rest to 0 def mask_value(a): if a <= 128: return 255 return 0 mask = Image.eval(alpha, mask_value) else: #no transparency mask = None if coding == "png/L": im = im.convert("L", palette=Image.ADAPTIVE, colors=255) bpp = 8 elif coding == "png/P": #I wanted to use the "better" adaptive method, #but this does NOT work (produces a black image instead): #im.convert("P", palette=Image.ADAPTIVE) im = im.convert("P", palette=Image.WEB, colors=255) bpp = 8 if mask: # paste the alpha mask to the color of index 255 im.paste(255, mask) kwargs = im.info if mask is not None: client_options["transparency"] = 255 kwargs["transparency"] = 255 if speed == 0: #optimizing png is very rarely worth doing kwargs["optimize"] = True client_options["optimize"] = True #level can range from 0 to 9, but anything above 5 is way too slow for small gains: #76-100 -> 1 #51-76 -> 2 #etc level = max(1, min(5, (125 - speed) // 25)) kwargs["compress_level"] = level #no need to expose to the client: #client_options["compress_level"] = level #default is good enough, no need to override, other options: #DEFAULT_STRATEGY, FILTERED, HUFFMAN_ONLY, RLE, FIXED #kwargs["compress_type"] = Image.DEFAULT_STRATEGY pil_fmt = "PNG" im.save(buf, pil_fmt, **kwargs) if SAVE_TO_FILE: filename = "./%s.%s" % (time.time(), pil_fmt) im.save(filename, pil_fmt) log.info("saved %s to %s", coding, filename) log("sending %sx%s %s as %s, mode=%s, options=%s", w, h, pixel_format, coding, im.mode, kwargs) data = buf.getvalue() buf.close() return coding, Compressed( coding, data), client_options, image.get_width(), image.get_height(), 0, bpp
def process_server_packet(self, proto, packet): packet_type = packet[0] log("process_server_packet: %s", packet_type) if packet_type == Protocol.CONNECTION_LOST: self.stop("server connection lost", proto) return elif packet_type == "disconnect": log("got disconnect from server: %s", packet[1]) if self.exit: self.server_protocol.close() else: self.stop("disconnect from server: %s" % packet[1]) elif packet_type == "hello": c = typedict(packet[1]) maxw, maxh = c.intpair("max_desktop_size", (4096, 4096)) caps = self.filter_server_caps(c) #add new encryption caps: if self.cipher: from xpra.net.crypto import crypto_backend_init, new_cipher_caps, DEFAULT_PADDING crypto_backend_init() padding_options = self.caps.strlistget( "cipher.padding.options", [DEFAULT_PADDING]) auth_caps = new_cipher_caps(self.client_protocol, self.cipher, self.encryption_key, padding_options) caps.update(auth_caps) #may need to bump packet size: proto.max_packet_size = maxw * maxh * 4 * 4 file_transfer = self.caps.boolget("file-transfer") and c.boolget( "file-transfer") file_size_limit = max(self.caps.intget("file-size-limit"), c.intget("file-size-limit")) file_max_packet_size = int(file_transfer) * ( 1024 + file_size_limit * 1024 * 1024) self.client_protocol.max_packet_size = max( self.client_protocol.max_packet_size, file_max_packet_size) self.server_protocol.max_packet_size = max( self.server_protocol.max_packet_size, file_max_packet_size) packet = ("hello", caps) elif packet_type == "info-response": #adds proxy info: #note: this is only seen by the client application #"xpra info" is a new connection, which talks to the proxy server... info = packet[1] info.update(self.get_proxy_info(proto)) elif packet_type == "lost-window": wid = packet[1] #mark it as lost so we can drop any current/pending frames self.lost_windows.add(wid) #queue it so it gets cleaned safely (for video encoders mostly): self.encode_queue.put(packet) #and fall through so tell the client immediately elif packet_type == "draw": #use encoder thread: self.encode_queue.put(packet) #which will queue the packet itself when done: return #we do want to reformat cursor packets... #as they will have been uncompressed by the network layer already: elif packet_type == "cursor": #packet = ["cursor", x, y, width, height, xhot, yhot, serial, pixels, name] #or: #packet = ["cursor", "png", x, y, width, height, xhot, yhot, serial, pixels, name] #or: #packet = ["cursor", ""] if len(packet) >= 8: #hard to distinguish png cursors from normal cursors... try: int(packet[1]) self._packet_recompress(packet, 8, "cursor") except: self._packet_recompress(packet, 9, "cursor") elif packet_type == "window-icon": self._packet_recompress(packet, 5, "icon") elif packet_type == "send-file": if packet[6]: packet[6] = Compressed("file-data", packet[6]) elif packet_type == "send-file-chunk": if packet[3]: packet[3] = Compressed("file-chunk-data", packet[3]) elif packet_type == "challenge": password = self.session_options.get("password") if not password: self.stop( "authentication requested by the server, but no password available for this session" ) return from xpra.net.crypto import get_salt, gendigest #client may have already responded to the challenge, #so we have to handle authentication from this end server_salt = bytestostr(packet[1]) l = len(server_salt) digest = bytestostr(packet[3]) salt_digest = "xor" if len(packet) >= 5: salt_digest = bytestostr(packet[4]) if salt_digest in ("xor", "des"): if not LEGACY_SALT_DIGEST: self.stop("server uses legacy salt digest '%s'" % salt_digest) return log.warn( "Warning: server using legacy support for '%s' salt digest", salt_digest) if salt_digest == "xor": #with xor, we have to match the size assert l >= 16, "server salt is too short: only %i bytes, minimum is 16" % l assert l <= 256, "server salt is too long: %i bytes, maximum is 256" % l else: #other digest, 32 random bytes is enough: l = 32 client_salt = get_salt(l) salt = gendigest(salt_digest, client_salt, server_salt) challenge_response = gendigest(digest, password, salt) if not challenge_response: log("invalid digest module '%s': %s", digest) self.stop( "server requested '%s' digest but it is not supported" % digest) return log.info("sending %s challenge response", digest) self.send_hello(challenge_response, client_salt) return self.queue_client_packet(packet)
def process_draw(self, packet): wid, x, y, width, height, encoding, pixels, _, rowstride, client_options = packet[1:11] #never modify mmap packets if encoding in ("mmap", "scroll"): return True #we have a proxy video packet: rgb_format = client_options.get("rgb_format", "") enclog("proxy draw: client_options=%s", client_options) def send_updated(encoding, compressed_data, updated_client_options): #update the packet with actual encoding data used: packet[6] = encoding packet[7] = compressed_data packet[10] = updated_client_options enclog("returning %s bytes from %s, options=%s", len(compressed_data), len(pixels), updated_client_options) return (wid not in self.lost_windows) def passthrough(strip_alpha=True): enclog("proxy draw: %s passthrough (rowstride: %s vs %s, strip alpha=%s)", rgb_format, rowstride, client_options.get("rowstride", 0), strip_alpha) if strip_alpha: #passthrough as plain RGB: Xindex = rgb_format.upper().find("X") if Xindex>=0 and len(rgb_format)==4: #force clear alpha (which may be garbage): newdata = bytearray(pixels) for i in range(len(pixels)/4): newdata[i*4+Xindex] = chr(255) packet[9] = client_options.get("rowstride", 0) cdata = bytes(newdata) else: cdata = pixels new_client_options = {"rgb_format" : rgb_format} else: #preserve cdata = pixels new_client_options = client_options wrapped = Compressed("%s pixels" % encoding, cdata) #FIXME: we should not assume that rgb32 is supported here... #(we may have to convert to rgb24..) return send_updated("rgb32", wrapped, new_client_options) proxy_video = client_options.get("proxy", False) if PASSTHROUGH and (encoding in ("rgb32", "rgb24") or proxy_video): #we are dealing with rgb data, so we can pass it through: return passthrough(proxy_video) elif not self.video_encoder_types or not client_options or not proxy_video: #ensure we don't try to re-compress the pixel data in the network layer: #(re-add the "compressed" marker that gets lost when we re-assemble packets) packet[7] = Compressed("%s pixels" % encoding, packet[7]) return True #video encoding: find existing encoder ve = self.video_encoders.get(wid) if ve: if ve in self.lost_windows: #we cannot clean the video encoder here, there may be more frames queue up #"lost-window" in encode_loop will take care of it safely return False #we must verify that the encoder is still valid #and scrap it if not (ie: when window is resized) if ve.get_width()!=width or ve.get_height()!=height: enclog("closing existing video encoder %s because dimensions have changed from %sx%s to %sx%s", ve, ve.get_width(), ve.get_height(), width, height) ve.clean() ve = None elif ve.get_encoding()!=encoding: enclog("closing existing video encoder %s because encoding has changed from %s to %s", ve.get_encoding(), encoding) ve.clean() ve = None #scaling and depth are proxy-encoder attributes: scaling = client_options.get("scaling", (1, 1)) depth = client_options.get("depth", 24) rowstride = client_options.get("rowstride", rowstride) quality = client_options.get("quality", -1) speed = client_options.get("speed", -1) timestamp = client_options.get("timestamp") image = ImageWrapper(x, y, width, height, pixels, rgb_format, depth, rowstride, planes=ImageWrapper.PACKED) if timestamp is not None: image.set_timestamp(timestamp) #the encoder options are passed through: encoder_options = client_options.get("options", {}) if not ve: #make a new video encoder: spec = self._find_video_encoder(encoding, rgb_format) if spec is None: #no video encoder! enc_pillow = get_codec("enc_pillow") if not enc_pillow: from xpra.server.picture_encode import warn_encoding_once warn_encoding_once("no-video-no-PIL", "no video encoder found for rgb format %s, sending as plain RGB!" % rgb_format) return passthrough(True) enclog("no video encoder available: sending as jpeg") coding, compressed_data, client_options, _, _, _, _ = enc_pillow.encode("jpeg", image, quality, speed, False) return send_updated(coding, compressed_data, client_options) enclog("creating new video encoder %s for window %s", spec, wid) ve = spec.make_instance() #dst_formats is specified with first frame only: dst_formats = client_options.get("dst_formats") if dst_formats is not None: #save it in case we timeout the video encoder, #so we can instantiate it again, even from a frame no>1 self.video_encoders_dst_formats = dst_formats else: assert self.video_encoders_dst_formats, "BUG: dst_formats not specified for proxy and we don't have it either" dst_formats = self.video_encoders_dst_formats ve.init_context(width, height, rgb_format, dst_formats, encoding, quality, speed, scaling, {}) self.video_encoders[wid] = ve self.video_encoders_last_used_time[wid] = time.time() #just to make sure this is always set #actual video compression: enclog("proxy compression using %s with quality=%s, speed=%s", ve, quality, speed) data, out_options = ve.compress_image(image, quality, speed, encoder_options) #pass through some options if we don't have them from the encoder #(maybe we should also use the "pts" from the real server?) for k in ("timestamp", "rgb_format", "depth", "csc"): if k not in out_options and k in client_options: out_options[k] = client_options[k] self.video_encoders_last_used_time[wid] = time.time() return send_updated(ve.get_encoding(), Compressed(encoding, data), out_options)
def process_server_packet(self, proto, packet): packet_type = packet[0] log("process_server_packet: %s", packet_type) if packet_type==Protocol.CONNECTION_LOST: self.stop("server connection lost", proto) return elif packet_type=="disconnect": log("got disconnect from server: %s", packet[1]) if self.exit: self.server_protocol.close() else: self.stop("disconnect from server: %s" % packet[1]) elif packet_type=="hello": c = typedict(packet[1]) maxw, maxh = c.intpair("max_desktop_size", (4096, 4096)) caps = self.filter_server_caps(c) #add new encryption caps: if self.cipher: from xpra.net.crypto import crypto_backend_init, new_cipher_caps, DEFAULT_PADDING crypto_backend_init() padding_options = self.caps.strlistget("cipher.padding.options", [DEFAULT_PADDING]) auth_caps = new_cipher_caps(self.client_protocol, self.cipher, self.encryption_key, padding_options) caps.update(auth_caps) #may need to bump packet size: proto.max_packet_size = maxw*maxh*4*4 file_transfer = self.caps.boolget("file-transfer") and c.boolget("file-transfer") file_size_limit = max(self.caps.intget("file-size-limit"), c.intget("file-size-limit")) file_max_packet_size = int(file_transfer) * (1024 + file_size_limit*1024*1024) self.client_protocol.max_packet_size = max(self.client_protocol.max_packet_size, file_max_packet_size) self.server_protocol.max_packet_size = max(self.server_protocol.max_packet_size, file_max_packet_size) packet = ("hello", caps) elif packet_type=="info-response": #adds proxy info: #note: this is only seen by the client application #"xpra info" is a new connection, which talks to the proxy server... info = packet[1] info.update(self.get_proxy_info(proto)) elif packet_type=="lost-window": wid = packet[1] #mark it as lost so we can drop any current/pending frames self.lost_windows.add(wid) #queue it so it gets cleaned safely (for video encoders mostly): self.encode_queue.put(packet) #and fall through so tell the client immediately elif packet_type=="draw": #use encoder thread: self.encode_queue.put(packet) #which will queue the packet itself when done: return #we do want to reformat cursor packets... #as they will have been uncompressed by the network layer already: elif packet_type=="cursor": #packet = ["cursor", x, y, width, height, xhot, yhot, serial, pixels, name] #or: #packet = ["cursor", "png", x, y, width, height, xhot, yhot, serial, pixels, name] #or: #packet = ["cursor", ""] if len(packet)>=8: #hard to distinguish png cursors from normal cursors... try: int(packet[1]) self._packet_recompress(packet, 8, "cursor") except: self._packet_recompress(packet, 9, "cursor") elif packet_type=="window-icon": self._packet_recompress(packet, 5, "icon") elif packet_type=="send-file": if packet[6]: packet[6] = Compressed("file-data", packet[6]) elif packet_type=="send-file-chunk": if packet[3]: packet[3] = Compressed("file-chunk-data", packet[3]) elif packet_type=="challenge": from xpra.net.crypto import get_salt #client may have already responded to the challenge, #so we have to handle authentication from this end salt = packet[1] digest = packet[3] client_salt = get_salt(len(salt)) salt = xor_str(salt, client_salt) if digest!=b"hmac": self.stop("digest mode '%s' not supported", std(digest)) return password = self.disp_desc.get("password", self.session_options.get("password")) log("password from %s / %s = %s", self.disp_desc, self.session_options, password) if not password: self.stop("authentication requested by the server, but no password available for this session") return import hmac, hashlib password = strtobytes(password) salt = strtobytes(salt) challenge_response = hmac.HMAC(password, salt, digestmod=hashlib.md5).hexdigest() log.info("sending %s challenge response", digest) self.send_hello(challenge_response, client_salt) return self.queue_client_packet(packet)