Example #1
0
def webm_encode(image, quality):
    assert enc_webm and webp_handlers, "webp components are missing"

    BitmapHandler = webp_handlers.BitmapHandler
    handler_encs = {
                "RGB" : (BitmapHandler.RGB,     "EncodeRGB",  "EncodeLosslessRGB",  False),
                "BGR" : (BitmapHandler.BGR,     "EncodeBGR",  "EncodeLosslessBGR",  False),
                "RGBA": (BitmapHandler.RGBA,    "EncodeRGBA", "EncodeLosslessRGBA", True),
                "RGBX": (BitmapHandler.RGBA,    "EncodeRGBA", "EncodeLosslessRGBA", False),
                "BGRA": (BitmapHandler.BGRA,    "EncodeBGRA", "EncodeLosslessBGRA", True),
                "BGRX": (BitmapHandler.BGRA,    "EncodeBGRA", "EncodeLosslessBGRA", False),
                }
    pixel_format = image.get_pixel_format()
    h_e = handler_encs.get(pixel_format)
    assert h_e is not None, "cannot handle rgb format %s with webp!" % pixel_format
    bh, lossy_enc, lossless_enc, has_alpha = h_e
    q = max(1, quality)
    enc = None
    if q>=100 and has_codec("enc_webm_lossless"):
        enc = getattr(enc_webm, lossless_enc)
        kwargs = {}
        client_options = {}
        log("webm_encode(%s, %s) using lossless encoder=%s for %s", image, enc, pixel_format)
    if enc is None:
        enc = getattr(enc_webm, lossy_enc)
        kwargs = {"quality" : q}
        client_options = {"quality" : q}
        log("webm_encode(%s, %s) using lossy encoder=%s with quality=%s for %s", image, enc, q, pixel_format)
    handler = BitmapHandler(image.get_pixels(), bh, image.get_width(), image.get_height(), image.get_rowstride())
    bpp = 24
    if has_alpha:
        client_options["has_alpha"] = True
        bpp = 32
    return "webp", Compressed("webp", str(enc(handler, **kwargs).data)), client_options, image.get_width(), image.get_height(), 0, bpp
Example #2
0
def webp_encode(coding, image, supports_transparency, quality, speed, options):
    stride = image.get_rowstride()
    enc_webp = get_codec("enc_webp")
    if enc_webp and stride>0 and stride%4==0 and image.get_pixel_format() in ("BGRA", "BGRX"):
        #prefer Cython module:
        alpha = supports_transparency and image.get_pixel_format().find("A")>=0
        w = image.get_width()
        h = image.get_height()
        if quality==100:
            #webp lossless is unbearibly slow for only marginal compression improvements,
            #so force max speed:
            speed = 100
        else:
            #normalize speed for webp: avoid low speeds!
            speed = int(sqrt(speed) * 10)
        speed = max(0, min(100, speed))
        cdata = enc_webp.compress(image.get_pixels(), w, h, stride=stride/4, quality=quality, speed=speed, has_alpha=alpha)
        client_options = {"speed" : speed}
        if quality>=0 and quality<=100:
            client_options["quality"] = quality
        if alpha:
            client_options["has_alpha"] = True
        return "webp", Compressed("webp", cdata), client_options, image.get_width(), image.get_height(), 0, 24
    enc_webm = get_codec("enc_webm")
    webp_handlers = get_codec("webm_bitmap_handlers")
    if enc_webm and webp_handlers:
        return webm_encode(image, quality)
    #fallback to PIL
    return PIL_encode(coding, image, quality, speed, supports_transparency)
Example #3
0
 def make_screenshot_packet(self):
     w, h, encoding, rowstride, data = self.root_window_model.take_screenshot(
     )
     assert encoding == "png"  #use fixed encoding for now
     return [
         "screenshot", w, h, encoding, rowstride,
         Compressed(encoding, data)
     ]
Example #4
0
    def process_server_packet(self, proto, packet):
        packet_type = packet[0]
        debug("process_server_packet: %s", packet_type)
        if packet_type == Protocol.CONNECTION_LOST:
            self.stop("server connection lost", proto)
            return
        elif packet_type == "hello":
            c = typedict(packet[1])
            maxw, maxh = c.intpair("max_desktop_size", (4096, 4096))
            proto.max_packet_size = maxw * maxh * 4

            caps = self.filter_server_caps(c)
            #add new encryption caps:
            if self.cipher:
                auth_caps = new_cipher_caps(self.client_protocol, self.cipher,
                                            self.encryption_key)
                caps.update(auth_caps)
            packet = ("hello", caps)
        elif packet_type == "info-response":
            #adds proxy info:
            info = packet[1]
            info.update(get_server_info("proxy."))
            info.update(get_thread_info("proxy.", proto))
        elif packet_type == "draw":
            #packet = ["draw", wid, x, y, outw, outh, coding, data, self._damage_packet_sequence, outstride, client_options]
            #ensure we don't try to re-compress the pixel data in the network layer:
            #(re-add the "compressed" marker that gets lost when we re-assemble packets)
            coding = packet[6]
            if coding != "mmap":
                data = packet[7]
                packet[7] = Compressed("%s pixels" % coding, data)
        elif packet_type == "cursor":
            #packet = ["cursor", x, y, width, height, xhot, yhot, serial, pixels, name]
            #or:
            #packet = ["cursor", ""]
            if len(packet) >= 9:
                pixels = packet[8]
                if len(pixels) < 64:
                    packet[8] = str(pixels)
                else:
                    packet[8] = compressed_wrapper("cursor", pixels)
        self.queue_client_packet(packet)
Example #5
0
 def do_make_screenshot_packet(self):
     debug = log.debug
     debug("grabbing screenshot")
     regions = []
     OR_regions = []
     for wid in reversed(sorted(self._id_to_window.keys())):
         window = self._id_to_window.get(wid)
         debug("screenshot: window(%s)=%s", wid, window)
         if window is None:
             continue
         if window.is_tray():
             debug("screenshot: skipping tray window %s", wid)
             continue
         if not window.is_managed():
             debug("screenshot: window %s is not/no longer managed", wid)
             continue
         if window.is_OR():
             x, y = window.get_property("geometry")[:2]
         else:
             x, y = self._desktop_manager.window_geometry(window)[:2]
         debug("screenshot: position(%s)=%s,%s", window, x, y)
         w, h = window.get_dimensions()
         debug("screenshot: size(%s)=%sx%s", window, w, h)
         try:
             img = trap.call_synced(window.get_image, 0, 0, w, h)
         except:
             log.warn("screenshot: window %s could not be captured", wid)
             continue
         if img is None:
             log.warn("screenshot: no pixels for window %s", wid)
             continue
         debug("screenshot: image=%s, size=%s", (img, img.get_size()))
         if img.get_pixel_format() not in ("RGB", "RGBA", "XRGB", "BGRX", "ARGB", "BGRA"):
             log.warn("window pixels for window %s using an unexpected rgb format: %s", wid, img.get_pixel_format())
             continue
         item = (wid, x, y, img)
         if window.is_OR():
             OR_regions.append(item)
         elif self._has_focus==wid:
             #window with focus first (drawn last)
             regions.insert(0, item)
         else:
             regions.append(item)
     all_regions = OR_regions+regions
     if len(all_regions)==0:
         debug("screenshot: no regions found, returning empty 0x0 image!")
         return ["screenshot", 0, 0, "png", -1, ""]
     debug("screenshot: found regions=%s, OR_regions=%s", len(regions), len(OR_regions))
     #in theory, we could run the rest in a non-UI thread since we're done with GTK..
     minx = min([x for (_,x,_,_) in all_regions])
     miny = min([y for (_,_,y,_) in all_regions])
     maxx = max([(x+img.get_width()) for (_,x,_,img) in all_regions])
     maxy = max([(y+img.get_height()) for (_,_,y,img) in all_regions])
     width = maxx-minx
     height = maxy-miny
     debug("screenshot: %sx%s, min x=%s y=%s", width, height, minx, miny)
     from PIL import Image                           #@UnresolvedImport
     screenshot = Image.new("RGBA", (width, height))
     for wid, x, y, img in reversed(all_regions):
         pixel_format = img.get_pixel_format()
         target_format = {
                  "XRGB"   : "RGB",
                  "BGRX"   : "RGB",
                  "BGRA"   : "RGBA"}.get(pixel_format, pixel_format)
         try:
             window_image = Image.frombuffer(target_format, (w, h), img.get_pixels(), "raw", pixel_format, img.get_rowstride())
         except:
             log.warn("failed to parse window pixels in %s format", pixel_format)
             continue
         tx = x-minx
         ty = y-miny
         screenshot.paste(window_image, (tx, ty))
     buf = StringIOClass()
     screenshot.save(buf, "png")
     data = buf.getvalue()
     buf.close()
     packet = ["screenshot", width, height, "png", width*4, Compressed("png", data)]
     debug("screenshot: %sx%s %s", packet[1], packet[2], packet[-1])
     return packet
Example #6
0
def rgb_encode(coding, image, rgb_formats, supports_transparency, speed,
               rgb_zlib, rgb_lz4, encoding_client_options, supports_rgb24zlib):
    pixel_format = image.get_pixel_format()
    #log("rgb_encode%s pixel_format=%s, rgb_formats=%s", (coding, image, rgb_formats, supports_transparency, speed, rgb_zlib, rgb_lz4, encoding_client_options, supports_rgb24zlib), pixel_format, rgb_formats)
    if pixel_format not in rgb_formats:
        if not rgb_reformat(image, rgb_formats, supports_transparency):
            raise Exception(
                "cannot find compatible rgb format to use for %s! (supported: %s)"
                % (pixel_format, rgb_formats))
        #get the new format:
        pixel_format = image.get_pixel_format()
        #switch encoding if necessary:
        if len(pixel_format) == 4 and coding == "rgb24":
            coding = "rgb32"
        elif len(pixel_format) == 3 and coding == "rgb32":
            coding = "rgb24"
    #always tell client which pixel format we are sending:
    options = {"rgb_format": pixel_format}
    #compress here and return a wrapper so network code knows it is already zlib compressed:
    pixels = image.get_pixels()

    #special case for when rowstride is so much bigger than the width
    #that we would end up sending large chunks of padding with each row of pixels
    #this happens with XShm pixel data (the default)
    stride = image.get_rowstride()
    width = image.get_width()
    rstride = roundup(width * len(pixel_format),
                      4)  #a reasonable stride: rounded up to 4
    height = image.get_height()
    if stride > 8 and rstride < stride:
        al = len(pixels)  #current buffer size
        el = rstride * height  #desirable size we could have
        if al - el > 1024 and el * 110 / 100 < al:  #is it worth re-striding to save space?
            #we'll save at least 1KB and 10%, do it
            #Note: we could also change the pixel format whilst we're at it
            # and convert BGRX to RGB for example (assuming RGB is also supported by the client)
            rows = []
            for y in range(height):
                rows.append(pixels[stride * y:stride * y + rstride])
            pixels = "".join(rows)
            log(
                "rgb_encode: %s pixels re-stride saving %i%% from %s (%s bytes) to %s (%s bytes)",
                pixel_format, 100 - 100 * el / al, stride, al, rstride, el)
            stride = rstride

    #compression
    #by default, wire=raw:
    raw_data = str(pixels)
    wire_data = raw_data
    level = 0
    algo = "not"
    if rgb_zlib or rgb_lz4:
        level = max(0, min(5, int(115 - speed) / 20))
        if len(pixels) < 1024:
            #fewer pixels, make it more likely we won't bother compressing:
            level = level / 2
    if level > 0:
        lz4 = use_lz4 and rgb_lz4 and level <= 3
        wire_data = compressed_wrapper(coding, pixels, level=level, lz4=lz4)
        raw_data = wire_data.data
        #log("%s/%s data compressed from %s bytes down to %s (%s%%) with lz4=%s",
        #         coding, pixel_format, len(pixels), len(raw_data), int(100.0*len(raw_data)/len(pixels)), self.rgb_lz4)
        if len(raw_data) >= (len(pixels) - 32):
            #compressed is actually bigger! (use uncompressed)
            level = 0
            wire_data = str(pixels)
            raw_data = wire_data
        else:
            if lz4:
                options["lz4"] = True
                algo = "lz4"
            else:
                options["zlib"] = level
                algo = "zlib"
    if pixel_format.upper().find("A") >= 0 or pixel_format.upper().find(
            "X") >= 0:
        bpp = 32
    else:
        bpp = 24
    log(
        "rgb_encode using level=%s, %s compressed %sx%s in %s/%s: %s bytes down to %s",
        level, algo, image.get_width(), image.get_height(), coding,
        pixel_format, len(pixels), len(raw_data))
    if not encoding_client_options or not supports_rgb24zlib:
        return coding, wire_data, {}, width, height, stride, bpp
    #wrap it using "Compressed" so the network layer receiving it
    #won't decompress it (leave it to the client's draw thread)
    return coding, Compressed(coding,
                              raw_data), options, width, height, stride, bpp
Example #7
0
            im = im.convert("P", palette=PIL.Image.WEB, colors=255)
            bpp = 8
        if mask:
            # paste the alpha mask to the color of index 255
            im.paste(255, mask)
        kwargs = im.info
        if mask is not None:
            client_options["transparency"] = 255
            kwargs["transparency"] = 255
        im.save(buf, "PNG", **kwargs)
    log("sending %sx%s %s as %s, mode=%s, options=%s", w, h, pixel_format,
        coding, im.mode, kwargs)
    data = buf.getvalue()
    buf.close()
    return coding, Compressed(
        coding,
        data), client_options, image.get_width(), image.get_height(), 0, bpp


def argb_swap(image, rgb_formats, supports_transparency):
    """ use the argb codec to do the RGB byte swapping """
    pixel_format = image.get_pixel_format()
    if None in (bgra_to_rgb, bgra_to_rgba, argb_to_rgb, argb_to_rgba):
        warn_encoding_once(
            "argb-module-missing",
            "no argb module, cannot convert %s to one of: %s" %
            (pixel_format, rgb_formats))
        return False

    #try to fallback to argb module
    #if we have one of the target pixel formats:
Example #8
0
    def video_encode(self, encoding, image, options):
        """
            This method is used by make_data_packet to encode frames using video encoders.
            Video encoders only deal with fixed dimensions,
            so we must clean and reinitialize the encoder if the window dimensions
            has changed.
            Since this runs in the non-UI thread 'data_to_packet', we must
            use the '_lock' to prevent races.
        """
        debug("video_encode%s", (encoding, image, options))
        x, y, w, h = image.get_geometry()[:4]
        assert x == 0 and y == 0, "invalid position: %s,%s" % (x, y)
        src_format = image.get_pixel_format()
        try:
            self._lock.acquire()
            if not self.check_pipeline(encoding, w, h, src_format):
                raise Exception(
                    "failed to setup a video pipeline for %s encoding with source format %s"
                    % (encoding, src_format))

            #dw and dh are the edges we don't handle here
            width = w & self.width_mask
            height = h & self.height_mask
            debug("video_encode%s wxh=%s-%s, widthxheight=%sx%s",
                  (encoding, image, options), w, h, width, height)

            csc_image, csc, enc_width, enc_height = self.csc_image(
                image, width, height)

            start = time.time()
            data, client_options = self._video_encoder.compress_image(
                csc_image, options)
            end = time.time()

            if csc_image is image:
                #no csc step, so the image comes from the UI server
                #and must be freed in the UI thread:
                self.idle_add(csc_image.free)
            else:
                #csc temporary images can be freed at will
                csc_image.free()
            del csc_image

            if data is None:
                log.error("video_encode: ouch, %s compression failed",
                          encoding)
                return None, None, 0
            if self.encoding_client_options:
                #tell the client which colour subsampling we used:
                #(note: see csc_equiv!)
                if self.uses_csc_atoms:
                    client_options["csc"] = self.csc_equiv(csc)
                else:
                    #ugly hack: expose internal ffmpeg/libav constant
                    #for old versions without the "csc_atoms" feature:
                    client_options[
                        "csc_pixel_format"] = get_avutil_enum_from_colorspace(
                            csc)
                #tell the client about scaling (the size of the encoded picture):
                #(unless the video encoder has already done so):
                if self._csc_encoder and ("scaled_size" not in client_options
                                          ) and (enc_width != width
                                                 or enc_height != height):
                    client_options["scaled_size"] = enc_width, enc_height
            debug(
                "video_encode encoder: %s %sx%s result is %s bytes (%.1f MPixels/s), client options=%s",
                encoding, enc_width, enc_height, len(data),
                (enc_width * enc_height /
                 (end - start + 0.000001) / 1024.0 / 1024.0), client_options)
            return self._video_encoder.get_type(), Compressed(
                encoding, data), client_options, width, height, 0, 24
        finally:
            self._lock.release()
    def process_draw(self, packet):
        wid, x, y, width, height, encoding, pixels, _, rowstride, client_options = packet[
            1:11]
        #never modify mmap packets
        if encoding == "mmap":
            return True

        if not self.video_encoder_types or not client_options or not client_options.get(
                "proxy", False):
            #ensure we don't try to re-compress the pixel data in the network layer:
            #(re-add the "compressed" marker that gets lost when we re-assemble packets)
            packet[7] = Compressed("%s pixels" % encoding, packet[7])
            return True

        #we have a proxy video packet:
        rgb_format = client_options.get("rgb_format", "")
        log("proxy draw: client_options=%s", client_options)

        def send_updated(encoding, compressed_data, client_options):
            #update the packet with actual encoding data used:
            packet[6] = encoding
            packet[7] = compressed_data
            packet[10] = client_options
            log("returning %s bytes from %s", len(compressed_data),
                len(pixels))
            return (wid not in self.lost_windows)

        def passthrough():
            #passthrough as plain RGB:
            newdata = bytearray(pixels)
            #force alpha (and assume BGRX..) for now:
            for i in range(len(pixels) / 4):
                newdata[i * 4 + 3] = chr(255)
            packet[9] = client_options.get("rowstride", 0)
            return send_updated("rgb32", str(newdata),
                                {"rgb_format": rgb_format})

        if PASSTHROUGH:
            passthrough()
            return

        #video encoding: find existing encoder
        ve = self.video_encoders.get(wid)
        if ve:
            if ve in self.lost_windows:
                #we cannot clean the video encoder here, there may be more frames queue up
                #"lost-window" in encode_loop will take care of it safely
                return False
            #we must verify that the encoder is still valid
            #and scrap it if not (ie: when window is resized)
            if ve.get_width() != width or ve.get_height() != height:
                log(
                    "closing existing video encoder %s because dimensions have changed from %sx%s to %sx%s",
                    ve, ve.get_width(), ve.get_height(), width, height)
                ve.clean()
                ve = None
            elif ve.get_encoding() != encoding:
                log(
                    "closing existing video encoder %s because encoding has changed from %s to %s",
                    ve.get_encoding(), encoding)
                ve.clean()
                ve = None
        #scaling and depth are proxy-encoder attributes:
        scaling = client_options.get("scaling", (1, 1))
        depth = client_options.get("depth", 24)
        rowstride = client_options.get("rowstride", rowstride)
        quality = client_options.get("quality", -1)
        speed = client_options.get("speed", -1)
        timestamp = client_options.get("timestamp")

        image = ImageWrapper(x,
                             y,
                             width,
                             height,
                             pixels,
                             rgb_format,
                             depth,
                             rowstride,
                             planes=ImageWrapper.PACKED)
        if timestamp is not None:
            image.set_timestamp(timestamp)

        #the encoder options are passed through:
        encoder_options = client_options.get("options", {})
        if not ve:
            #make a new video encoder:
            spec = self._find_video_encoder(encoding, rgb_format)
            if spec is None:
                #no video encoder!
                from xpra.server.picture_encode import PIL_encode, PIL, warn_encoding_once
                if PIL is None:
                    warn_encoding_once(
                        "no-video-no-PIL",
                        "no video encoder found for rgb format %s, sending as plain RGB!"
                        % rgb_format)
                    passthrough()
                    return
                log("no video encoder available: sending as jpeg")
                coding, compressed_data, client_options, _, _, _, _ = PIL_encode(
                    "jpeg", image, quality, speed, False)
                return send_updated(coding, compressed_data, client_options)

            log("creating new video encoder %s for window %s", spec, wid)
            ve = spec.make_instance()
            #dst_formats is specified with first frame only:
            dst_formats = client_options.get("dst_formats")
            if dst_formats is not None:
                #save it in case we timeout the video encoder,
                #so we can instantiate it again, even from a frame no>1
                self.video_encoders_dst_formats = dst_formats
            else:
                assert self.video_encoders_dst_formats, "BUG: dst_formats not specified for proxy and we don't have it either"
                dst_formats = self.video_encoders_dst_formats
            ve.init_context(width, height, rgb_format, dst_formats, encoding,
                            quality, speed, scaling, {})
            self.video_encoders[wid] = ve
            self.video_encoders_last_used_time[wid] = time.time(
            )  #just to make sure this is always set
        else:
            if quality >= 0:
                ve.set_encoding_quality(quality)
            if speed >= 0:
                ve.set_encoding_speed(speed)
        #actual video compression:
        log("proxy compression using %s with quality=%s, speed=%s", ve,
            quality, speed)
        data, client_options = ve.compress_image(image, encoder_options)
        self.video_encoders_last_used_time[wid] = time.time()
        return send_updated(ve.get_encoding(), Compressed(encoding, data),
                            client_options)