Ejemplo n.º 1
0
 def add_stats(self, info, suffix=""):
     WindowSource.add_stats(self, info, suffix)
     prefix = "window[%s]." % self.wid
     info[prefix+"client.csc_modes"] = self.csc_modes
     info[prefix+"client.uses_swscale"] = self.uses_swscale
     info[prefix+"client.uses_csc_atoms"] = self.uses_csc_atoms
     info[prefix+"client.supports_scaling"] = self.video_scaling
     info[prefix+"scaling"] = self.actual_scaling
     if self._csc_encoder:
         info[prefix+"csc"+suffix] = self._csc_encoder.get_type()
         ci = self._csc_encoder.get_info()
         for k,v in ci.items():
             info[prefix+"csc."+k+suffix] = v
     if self._video_encoder:
         info[prefix+"encoder"+suffix] = self._video_encoder.get_type()
         vi = self._video_encoder.get_info()
         for k,v in vi.items():
             info[prefix+"encoder."+k+suffix] = v
     if self.last_pipeline_params:
         encoding, width, height, src_format = self.last_pipeline_params
         info[prefix+"encoding.pipeline_param.encoding"+suffix] = encoding
         info[prefix+"encoding.pipeline_param.dimensions"+suffix] = width, height
         info[prefix+"encoding.pipeline_param.src_format"+suffix] = src_format
     if self.last_pipeline_scores:
         i = 0
         for score, csc_spec, enc_in_format, encoder_spec in self.last_pipeline_scores:
             info[prefix+("encoding.pipeline_option[%s].score" % i)+suffix] = score
             info[prefix+("encoding.pipeline_option[%s].csc" % i)+suffix] = repr(csc_spec)
             info[prefix+("encoding.pipeline_option[%s].format" % i)+suffix] = str(enc_in_format)
             info[prefix+("encoding.pipeline_option[%s].encoder" % i)+suffix] = repr(encoder_spec)
             i += 1
Ejemplo n.º 2
0
    def __init__(self, *args):
        WindowSource.__init__(self, *args)
        # client uses uses_swscale (has extra limits on sizes)
        self.uses_swscale = self.encoding_options.get("uses_swscale", True)
        self.uses_csc_atoms = self.encoding_options.get("csc_atoms", False)
        self.video_scaling = self.encoding_options.get("video_scaling", False)
        if not self.encoding_client_options:
            # old clients can only use 420P:
            def_csc_modes = "YUV420P"
        else:
            # default for newer clients that don't specify "csc_modes":
            def_csc_modes = ("YUV420P", "YUV422P", "YUV444P")
        # 0.10 onwards should have specified csc_modes:
        self.csc_modes = self.encoding_options.get("csc_modes", def_csc_modes)

        for x in ("vpx", "x264"):
            if x in self.SERVER_CORE_ENCODINGS:
                self._encoders[x] = self.video_encode

        self.width_mask = 0xFFFF
        self.height_mask = 0xFFFF
        self.actual_scaling = None

        self._csc_encoder = None
        self._video_encoder = None
        self._lock = Lock()  # to ensure we serialize access to the encoder and its internals

        self.last_pipeline_params = None
        self.last_pipeline_scores = []
        self._video_pipeline_helper.may_init()
Ejemplo n.º 3
0
 def set_client_properties(self, properties):
     #client may restrict csc modes for specific windows
     self.csc_modes = properties.get("encoding.csc_modes", self.csc_modes)
     self.video_scaling = properties.get("encoding.video_scaling", self.video_scaling)
     self.uses_swscale = properties.get("encoding.uses_swscale", self.uses_swscale)
     WindowSource.set_client_properties(self, properties)
     debug("set_client_properties(%s) csc_modes=%s, video_scaling=%s, uses_swscale=%s", properties, self.csc_modes, self.video_scaling, self.uses_swscale)
Ejemplo n.º 4
0
    def __init__(self, *args):
        WindowSource.__init__(self, *args)
        #client uses uses_swscale (has extra limits on sizes)
        self.uses_swscale = self.encoding_options.get("uses_swscale", True)
        self.uses_csc_atoms = self.encoding_options.get("csc_atoms", False)
        self.video_scaling = self.encoding_options.get("video_scaling", False)
        if not self.encoding_client_options:
            #old clients can only use 420P:
            def_csc_modes = ("YUV420P")
        else:
            #default for newer clients that don't specify "csc_modes":
            def_csc_modes = ("YUV420P", "YUV422P", "YUV444P")
        #0.10 onwards should have specified csc_modes:
        self.csc_modes = self.encoding_options.get("csc_modes", def_csc_modes)

        for x in ("vpx", "x264"):
            if x in self.SERVER_CORE_ENCODINGS:
                self._encoders[x] = self.video_encode

        self.width_mask = 0xFFFF
        self.height_mask = 0xFFFF
        self.actual_scaling = None

        self._csc_encoder = None
        self._video_encoder = None
        self._lock = Lock(
        )  #to ensure we serialize access to the encoder and its internals

        self.last_pipeline_params = None
        self.last_pipeline_scores = []
        self._video_pipeline_helper.may_init()
Ejemplo n.º 5
0
 def cancel_damage(self):
     WindowSource.cancel_damage(self)
     if self._last_sequence_queued<self._sequence:
         #we must clean the video encoder to ensure
         #we will resend a key frame because it looks like we will
         #drop a frame which is being processed
         self.cleanup_codecs()
Ejemplo n.º 6
0
 def add_stats(self, info, suffix=""):
     WindowSource.add_stats(self, info, suffix)
     prefix = "window[%s]." % self.wid
     info[prefix + "client.csc_modes"] = self.csc_modes
     info[prefix + "client.uses_swscale"] = self.uses_swscale
     info[prefix + "client.uses_csc_atoms"] = self.uses_csc_atoms
     info[prefix + "client.supports_scaling"] = self.video_scaling
     info[prefix + "scaling"] = self.actual_scaling or (1, 1)
     if self._csc_encoder:
         info[prefix + "csc" + suffix] = self._csc_encoder.get_type()
         ci = self._csc_encoder.get_info()
         for k, v in ci.items():
             info[prefix + "csc." + k + suffix] = v
     if self._video_encoder:
         info[prefix + "encoder" + suffix] = self._video_encoder.get_type()
         vi = self._video_encoder.get_info()
         for k, v in vi.items():
             info[prefix + "encoder." + k + suffix] = v
     if self.last_pipeline_params:
         encoding, width, height, src_format = self.last_pipeline_params
         info[prefix + "pipeline_param.encoding" + suffix] = encoding
         info[prefix + "pipeline_param.dimensions" + suffix] = width, height
         info[prefix + "pipeline_param.src_format" + suffix] = src_format
     if self.last_pipeline_scores:
         i = 0
         for score, csc_spec, enc_in_format, encoder_spec in self.last_pipeline_scores:
             info[prefix + ("pipeline_option[%s].score" % i) +
                  suffix] = score
             info[prefix + ("pipeline_option[%s].csc" % i) +
                  suffix] = repr(csc_spec)
             info[prefix + ("pipeline_option[%s].format" % i) +
                  suffix] = str(enc_in_format)
             info[prefix + ("pipeline_option[%s].encoder" % i) +
                  suffix] = repr(encoder_spec)
             i += 1
Ejemplo n.º 7
0
 def cancel_damage(self):
     WindowSource.cancel_damage(self)
     if self._last_sequence_queued < self._sequence:
         #we must clean the video encoder to ensure
         #we will resend a key frame because it looks like we will
         #drop a frame which is being processed
         self.cleanup_codecs()
Ejemplo n.º 8
0
    def reconfigure(self, force_reload=False):
        """
            This is called when we want to force a full re-init (force_reload=True)
            or from the timer that allows to tune the quality and speed.
            (this tuning is done in WindowSource.reconfigure)
            Here we re-evaluate if the pipeline we are currently using
            is really the best one, and if not we switch to the best one.
            This uses get_video_pipeline_options() to get a list of pipeline
            options with a score for each.
        """
        debug("reconfigure(%s) csc_encoder=%s, video_encoder=%s", force_reload, self._csc_encoder, self._video_encoder)
        WindowSource.reconfigure(self, force_reload)
        if not self._video_encoder:
            return
        try:
            self._lock.acquire()
            ve = self._video_encoder
            if not ve or ve.is_closed():
                #could have been freed since we got the lock!
                return
            if force_reload:
                if self._csc_encoder:
                    self.do_csc_encoder_cleanup()
                self.do_video_encoder_cleanup()
                return

            pixel_format = None
            if self._csc_encoder:
                pixel_format = self._csc_encoder.get_src_format()
            else:
                pixel_format = ve.get_src_format()
            width = ve.get_width()
            height = ve.get_height()
            quality = self.get_current_quality()
            speed = self.get_current_speed()

            scores = self.get_video_pipeline_options(ve.get_encoding(), width, height, pixel_format)
            if len(scores)>0:
                debug("reconfigure(%s) best=%s", force_reload, scores[0])
                _, csc_spec, enc_in_format, encoder_spec = scores[0]
                if self._csc_encoder:
                    if csc_spec is None or \
                       type(self._csc_encoder)!=csc_spec.codec_class or \
                       self._csc_encoder.get_dst_format()!=enc_in_format:
                        debug("reconfigure(%s) found better csc encoder: %s", force_reload, scores[0])
                        self.do_csc_encoder_cleanup()
                if type(self._video_encoder)!=encoder_spec.codec_class or \
                   self._video_encoder.get_src_format()!=enc_in_format:
                    debug("reconfigure(%s) found better video encoder: %s", force_reload, scores[0])
                    self.do_video_encoder_cleanup()

            if self._video_encoder is None:
                self.setup_pipeline(scores, width, height, pixel_format)

            if self._video_encoder:
                self._video_encoder.set_encoding_speed(speed)
                self._video_encoder.set_encoding_quality(quality)
        finally:
            self._lock.release()
Ejemplo n.º 9
0
 def set_client_properties(self, properties):
     #client may restrict csc modes for specific windows
     self.csc_modes = properties.get("encoding.csc_modes", self.csc_modes)
     self.video_scaling = properties.get("encoding.video_scaling",
                                         self.video_scaling)
     self.uses_swscale = properties.get("encoding.uses_swscale",
                                        self.uses_swscale)
     WindowSource.set_client_properties(self, properties)
     debug(
         "set_client_properties(%s) csc_modes=%s, video_scaling=%s, uses_swscale=%s",
         properties, self.csc_modes, self.video_scaling, self.uses_swscale)
Ejemplo n.º 10
0
    def __init__(self, *args):
        WindowSource.__init__(self, *args)
        #client uses uses_swscale (has extra limits on sizes)
        self.uses_swscale = self.encoding_options.get("uses_swscale", True)
        self.uses_csc_atoms = self.encoding_options.get("csc_atoms", False)
        self.video_scaling = self.encoding_options.get("video_scaling", False)
        self.video_reinit = self.encoding_options.get("video_reinit", False)
        if not self.encoding_client_options:
            #old clients can only use 420P:
            def_csc_modes = ("YUV420P")
        else:
            #default for newer clients that don't specify "csc_modes":
            def_csc_modes = ("YUV420P", "YUV422P", "YUV444P")
        #0.10 onwards should have specified csc_modes:
        self.csc_modes = self.encoding_options.get("csc_modes", def_csc_modes)

        self.video_encodings = ("vp8", "vp9", "h264")
        for x in self.video_encodings:
            if x in self.server_core_encodings:
                self._encoders[x] = self.video_encode

        #these constraints get updated with real values
        #when we construct the video pipeline:
        self.min_w = 1
        self.min_h = 1
        self.max_w = 16384
        self.max_h = 16384
        self.width_mask = 0xFFFF
        self.height_mask = 0xFFFF
        self.actual_scaling = (1, 1)

        self._csc_encoder = None
        self._video_encoder = None
        self._lock = Lock(
        )  #to ensure we serialize access to the encoder and its internals

        self.last_pipeline_params = None
        self.last_pipeline_scores = []
        self.video_helper = getVideoHelper()
        if self.encoding_options.get("proxy.video", False):
            #if we "proxy video", we will modify the video helper to add
            #new encoders, so we must make a deep copy to preserve the original:
            self.video_helper = getVideoHelper().clone(deep=True)
            #enabling video proxy:
            try:
                self.parse_proxy_video()
            except:
                log.error("failed to parse proxy video", exc_info=True)
Ejemplo n.º 11
0
    def __init__(self, *args):
        WindowSource.__init__(self, *args)
        #client uses uses_swscale (has extra limits on sizes)
        self.uses_swscale = self.encoding_options.get("uses_swscale", True)
        self.uses_csc_atoms = self.encoding_options.get("csc_atoms", False)
        self.video_scaling = self.encoding_options.get("video_scaling", False)
        self.video_reinit = self.encoding_options.get("video_reinit", False)
        if not self.encoding_client_options:
            #old clients can only use 420P:
            def_csc_modes = ("YUV420P")
        else:
            #default for newer clients that don't specify "csc_modes":
            def_csc_modes = ("YUV420P", "YUV422P", "YUV444P")
        #0.10 onwards should have specified csc_modes:
        self.csc_modes = self.encoding_options.get("csc_modes", def_csc_modes)

        self.video_encodings = ("vp8", "vp9", "h264")
        for x in self.video_encodings:
            if x in self.server_core_encodings:
                self._encoders[x] = self.video_encode

        #these constraints get updated with real values
        #when we construct the video pipeline:
        self.min_w = 1
        self.min_h = 1
        self.max_w = 16384
        self.max_h = 16384
        self.width_mask = 0xFFFF
        self.height_mask = 0xFFFF
        self.actual_scaling = (1, 1)

        self._csc_encoder = None
        self._video_encoder = None
        self._lock = Lock()               #to ensure we serialize access to the encoder and its internals

        self.last_pipeline_params = None
        self.last_pipeline_scores = []
        self.video_helper = getVideoHelper()
        if self.encoding_options.get("proxy.video", False):
            #if we "proxy video", we will modify the video helper to add
            #new encoders, so we must make a deep copy to preserve the original:
            self.video_helper = getVideoHelper().clone(deep=True)
            #enabling video proxy:
            try:
                self.parse_proxy_video()
            except:
                log.error("failed to parse proxy video", exc_info=True)
Ejemplo n.º 12
0
    def do_get_best_encoding(self, batching, has_alpha, is_tray, is_OR,
                             pixel_count, ww, wh, current_encoding):
        """
            decide whether we send a full window update
            using the video encoder or if a small lossless region(s) is a better choice
        """
        encoding = WindowSource.do_get_best_encoding(self, batching, has_alpha,
                                                     is_tray, is_OR,
                                                     pixel_count, ww, wh,
                                                     current_encoding)
        if encoding is not None:
            #superclass knows best (usually a tray or transparent window):
            return encoding
        if current_encoding not in self.video_encodings:
            return None
        if ww < self.min_w or ww > self.max_w or wh < self.min_h or wh > self.max_h:
            #video encoder cannot handle this size!
            #(maybe this should be an 'assert' statement here?)
            return None

        def switch_to_lossless(reason):
            coding = self.find_common_lossless_encoder(has_alpha,
                                                       current_encoding,
                                                       ww * wh)
            debug(
                "do_get_best_encoding(..) temporarily switching to %s encoder for %s pixels: %s",
                coding, pixel_count, reason)
            return coding

        max_nvoip = MAX_NONVIDEO_OR_INITIAL_PIXELS
        max_nvp = MAX_NONVIDEO_PIXELS
        if not batching:
            max_nvoip *= 128
            max_nvp *= 128
        if self._sequence == 1 and is_OR and pixel_count < max_nvoip:
            #first frame of a small-ish OR window, those are generally short lived
            #so delay using a video encoder until the next frame:
            return switch_to_lossless("first small frame of an OR window")
        #ensure the dimensions we use for decision making are the ones actually used:
        ww = ww & self.width_mask
        wh = wh & self.height_mask
        if ww < self.min_w or ww > self.max_w or wh < self.min_h or wh > self.max_h:
            return switch_to_lossless(
                "window dimensions are unsuitable for this encoder/csc")
        if pixel_count < ww * wh * 0.01:
            #less than one percent of total area
            return switch_to_lossless("few pixels (%.2f%% of window)" %
                                      (100.0 * pixel_count / ww / wh))
        if pixel_count > max_nvp:
            #too many pixels, use current video encoder
            return self.get_core_encoding(has_alpha, current_encoding)
        if pixel_count < 0.5 * ww * wh and not batching:
            #less than 50% of the full window and we're not batching
            return switch_to_lossless("%i%% of image, not batching" %
                                      (100.0 * pixel_count / ww / wh))
        return self.get_core_encoding(has_alpha, current_encoding)
Ejemplo n.º 13
0
    def do_get_best_encoding(self, batching, has_alpha, is_tray, is_OR, pixel_count, ww, wh, current_encoding):
        """
            decide whether we send a full window update
            using the video encoder or if a small lossless region(s) is a better choice
        """
        encoding = WindowSource.do_get_best_encoding(self, batching, has_alpha, is_tray, is_OR, pixel_count, ww, wh, current_encoding)
        if encoding is not None:
            #superclass knows best (usually a tray or transparent window):
            return encoding
        if current_encoding not in self.video_encodings:
            return None
        if ww<self.min_w or ww>self.max_w or wh<self.min_h or wh>self.max_h:
            #video encoder cannot handle this size!
            #(maybe this should be an 'assert' statement here?)
            return None

        def switch_to_lossless(reason):
            coding = self.find_common_lossless_encoder(has_alpha, current_encoding, ww*wh)
            debug("do_get_best_encoding(..) temporarily switching to %s encoder for %s pixels: %s", coding, pixel_count, reason)
            return  coding

        #calculate the threshold for using video vs small regions:
        max_nvp = MAX_NONVIDEO_PIXELS
        if pixel_count<=max_nvp:
            #small window!
            return switch_to_lossless("small window: %sx%s" % (ww, wh))

        s = self.get_current_speed()
        if s>75:
            #if speed is high, assume we have bandwidth to spare
            #and prefer non-video:
            max_nvp *= (1.0+(s-75.0)/5.0)
        if is_OR:
            #OR windows tend to be static:
            max_nvp *= 4
        if self._sequence<=5:
            #discount the first frames, the window may be temporary:
            max_nvp *= 10-self._sequence
        if not batching:
            #if we're not batching, allow more pixels:
            max_nvp *= 4
        if self._video_encoder:
            #if we have a video encoder already, make it more likely we'll use it:
            max_nvp /= 2

        if pixel_count<=max_nvp:
            #below threshold
            return switch_to_lossless("frame number %s: %s pixels (threshold=%s)" % (self._sequence, pixel_count, max_nvp))

        #ensure the dimensions we use for decision making are the ones actually used:
        ww = ww & self.width_mask
        wh = wh & self.height_mask
        if ww<self.min_w or ww>self.max_w or wh<self.min_h or wh>self.max_h:
            #failsafe:
            return switch_to_lossless("window dimensions are unsuitable for this encoder/csc")
        return self.get_core_encoding(has_alpha, current_encoding)
Ejemplo n.º 14
0
    def __init__(self, *args):
        WindowSource.__init__(self, *args)
        #client uses uses_swscale (has extra limits on sizes)
        self.uses_swscale = self.encoding_options.get("uses_swscale", True)
        self.uses_csc_atoms = self.encoding_options.get("csc_atoms", False)
        self.video_scaling = self.encoding_options.get("video_scaling", False)
        self.video_reinit = self.encoding_options.get("video_reinit", False)
        if not self.encoding_client_options:
            #old clients can only use 420P:
            def_csc_modes = ("YUV420P", )
        else:
            #default for newer clients that don't specify "csc_modes":
            def_csc_modes = ("YUV420P", "YUV422P", "YUV444P")
        #0.10 onwards should have specified csc_modes:
        self.csc_modes = self.encoding_options.get("csc_modes", def_csc_modes)

        self.video_encodings = ("vp8", "vp9", "h264")
        for x in self.video_encodings:
            if x in self.server_core_encodings:
                self._encoders[x] = self.video_encode

        #these constraints get updated with real values
        #when we construct the video pipeline:
        self.min_w = 1
        self.min_h = 1
        self.max_w = 16384
        self.max_h = 16384
        self.width_mask = 0xFFFF
        self.height_mask = 0xFFFF
        self.actual_scaling = (1, 1)

        self._csc_encoder = None
        self._video_encoder = None
        self._lock = Lock(
        )  #to ensure we serialize access to the encoder and its internals

        self.last_pipeline_params = None
        self.last_pipeline_scores = []
        WindowVideoSource._video_helper.may_init()
Ejemplo n.º 15
0
    def __init__(self, *args):
        WindowSource.__init__(self, *args)
        #client uses uses_swscale (has extra limits on sizes)
        self.uses_swscale = self.encoding_options.get("uses_swscale", True)
        self.uses_csc_atoms = self.encoding_options.get("csc_atoms", False)
        self.video_scaling = self.encoding_options.get("video_scaling", False)
        self.video_reinit = self.encoding_options.get("video_reinit", False)
        if not self.encoding_client_options:
            #old clients can only use 420P:
            def_csc_modes = ("YUV420P", )
        else:
            #default for newer clients that don't specify "csc_modes":
            def_csc_modes = ("YUV420P", "YUV422P", "YUV444P")
        #0.10 onwards should have specified csc_modes:
        self.csc_modes = self.encoding_options.get("csc_modes", def_csc_modes)

        self.video_encodings = ("vp8", "vp9", "h264")
        for x in self.video_encodings:
            if x in self.server_core_encodings:
                self._encoders[x] = self.video_encode

        #these constraints get updated with real values
        #when we construct the video pipeline:
        self.min_w = 1
        self.min_h = 1
        self.max_w = 16384
        self.max_h = 16384
        self.width_mask = 0xFFFF
        self.height_mask = 0xFFFF
        self.actual_scaling = (1, 1)

        self._csc_encoder = None
        self._video_encoder = None
        self._lock = Lock()               #to ensure we serialize access to the encoder and its internals

        self.last_pipeline_params = None
        self.last_pipeline_scores = []
        WindowVideoSource._video_helper.may_init()
Ejemplo n.º 16
0
 def process_damage_region(self, damage_time, window, x, y, w, h, coding, options):
     WindowSource.process_damage_region(self, damage_time, window, x, y, w, h, coding, options)
     #now figure out if we need to send edges separately:
     dw = w - (w & self.width_mask)
     dh = h - (h & self.height_mask)
     if coding in self.video_encodings and (dw>0 or dh>0):
         if dw>0:
             lossless = self.find_common_lossless_encoder(window.has_alpha(), coding, dw*h)
             WindowSource.process_damage_region(self, damage_time, window, x+w-dw, y, dw, h, lossless, options)
         if dh>0:
             lossless = self.find_common_lossless_encoder(window.has_alpha(), coding, w*dh)
             WindowSource.process_damage_region(self, damage_time, window, x, y+h-dh, x+w, dh, lossless, options)
Ejemplo n.º 17
0
    def do_get_best_encoding(self, batching, has_alpha, is_tray, is_OR, pixel_count, ww, wh, current_encoding):
        """
            decide whether we send a full window update
            using the video encoder or if a small lossless region(s) is a better choice
        """
        encoding = WindowSource.do_get_best_encoding(self, batching, has_alpha, is_tray, is_OR, pixel_count, ww, wh, current_encoding)
        if encoding is not None:
            #superclass knows best (usually a tray or transparent window):
            return encoding
        if current_encoding not in self.video_encodings:
            return None
        if ww<self.min_w or ww>self.max_w or wh<self.min_h or wh>self.max_h:
            #video encoder cannot handle this size!
            #(maybe this should be an 'assert' statement here?)
            return None

        def switch_to_lossless(reason):
            coding = self.find_common_lossless_encoder(has_alpha, current_encoding, ww*wh)
            debug("do_get_best_encoding(..) temporarily switching to %s encoder for %s pixels: %s", coding, pixel_count, reason)
            return  coding

        max_nvoip = MAX_NONVIDEO_OR_INITIAL_PIXELS
        max_nvp = MAX_NONVIDEO_PIXELS
        if not batching:
            max_nvoip *= 128
            max_nvp *= 128
        if self._sequence==1 and is_OR and pixel_count<max_nvoip:
            #first frame of a small-ish OR window, those are generally short lived
            #so delay using a video encoder until the next frame:
            return switch_to_lossless("first small frame of an OR window")
        #ensure the dimensions we use for decision making are the ones actually used:
        ww = ww & self.width_mask
        wh = wh & self.height_mask
        if ww<self.min_w or ww>self.max_w or wh<self.min_h or wh>self.max_h:
            return switch_to_lossless("window dimensions are unsuitable for this encoder/csc")
        if pixel_count<ww*wh*0.01:
            #less than one percent of total area
            return switch_to_lossless("few pixels (%.2f%% of window)" % (100.0*pixel_count/ww/wh))
        if pixel_count>max_nvp:
            #too many pixels, use current video encoder
            return self.get_core_encoding(has_alpha, current_encoding)
        if pixel_count<0.5*ww*wh and not batching:
            #less than 50% of the full window and we're not batching
            return switch_to_lossless("%i%% of image, not batching" % (100.0*pixel_count/ww/wh))
        return self.get_core_encoding(has_alpha, current_encoding)
Ejemplo n.º 18
0
 def process_damage_region(self, damage_time, window, x, y, w, h, coding,
                           options):
     WindowSource.process_damage_region(self, damage_time, window, x, y, w,
                                        h, coding, options)
     #now figure out if we need to send edges separately:
     dw = w - (w & self.width_mask)
     dh = h - (h & self.height_mask)
     if coding in self.video_encodings and (dw > 0 or dh > 0):
         if dw > 0:
             lossless = self.find_common_lossless_encoder(
                 window.has_alpha(), coding, dw * h)
             WindowSource.process_damage_region(self, damage_time, window,
                                                x + w - dw, y, dw, h,
                                                lossless, options)
         if dh > 0:
             lossless = self.find_common_lossless_encoder(
                 window.has_alpha(), coding, w * dh)
             WindowSource.process_damage_region(self, damage_time, window,
                                                x, y + h - dh, x + w, dh,
                                                lossless, options)
Ejemplo n.º 19
0
 def must_encode_full_frame(self, window, encoding):
     return WindowSource.must_encode_full_frame(self, window, encoding) or (encoding in self.video_encodings)
Ejemplo n.º 20
0
 def unmap(self):
     WindowSource.cancel_damage(self)
     self.cleanup_codecs()
Ejemplo n.º 21
0
 def set_new_encoding(self, encoding):
     if self.encoding!=encoding:
         #ensure we re-init the codecs asap:
         self.cleanup_codecs()
     WindowSource.set_new_encoding(self, encoding)
Ejemplo n.º 22
0
 def cleanup(self):
     WindowSource.cleanup(self)
     self.cleanup_codecs()
Ejemplo n.º 23
0
 def cleanup(self):
     WindowSource.cleanup(self)
     self.cleanup_codecs()
Ejemplo n.º 24
0
 def set_new_encoding(self, encoding):
     if self.encoding != encoding:
         #ensure we re-init the codecs asap:
         self.cleanup_codecs()
     WindowSource.set_new_encoding(self, encoding)
Ejemplo n.º 25
0
 def must_encode_full_frame(self, window, encoding):
     return WindowSource.must_encode_full_frame(
         self, window, encoding) or (encoding in self.video_encodings)
Ejemplo n.º 26
0
 def unmap(self):
     WindowSource.cancel_damage(self)
     self.cleanup_codecs()
Ejemplo n.º 27
0
    def do_get_best_encoding(self, batching, has_alpha, is_tray, is_OR,
                             pixel_count, ww, wh, current_encoding):
        """
            decide whether we send a full window update
            using the video encoder or if a small lossless region(s) is a better choice
        """
        encoding = WindowSource.do_get_best_encoding(self, batching, has_alpha,
                                                     is_tray, is_OR,
                                                     pixel_count, ww, wh,
                                                     current_encoding)
        if encoding is not None:
            #superclass knows best (usually a tray or transparent window):
            return encoding
        if current_encoding not in self.video_encodings:
            return None
        if ww < self.min_w or ww > self.max_w or wh < self.min_h or wh > self.max_h:
            #video encoder cannot handle this size!
            #(maybe this should be an 'assert' statement here?)
            return None

        def switch_to_lossless(reason):
            coding = self.find_common_lossless_encoder(has_alpha,
                                                       current_encoding,
                                                       ww * wh)
            debug(
                "do_get_best_encoding(..) temporarily switching to %s encoder for %s pixels: %s",
                coding, pixel_count, reason)
            return coding

        #calculate the threshold for using video vs small regions:
        max_nvp = MAX_NONVIDEO_PIXELS
        if pixel_count <= max_nvp:
            #small window!
            return switch_to_lossless("small window: %sx%s" % (ww, wh))

        s = self.get_current_speed()
        if s > 75:
            #if speed is high, assume we have bandwidth to spare
            #and prefer non-video:
            max_nvp *= (1.0 + (s - 75.0) / 5.0)
        if is_OR:
            #OR windows tend to be static:
            max_nvp *= 4
        if self._sequence <= 5:
            #discount the first frames, the window may be temporary:
            max_nvp *= 10 - self._sequence
        if not batching:
            #if we're not batching, allow more pixels:
            max_nvp *= 4
        if self._video_encoder:
            #if we have a video encoder already, make it more likely we'll use it:
            max_nvp /= 2

        if pixel_count <= max_nvp:
            #below threshold
            return switch_to_lossless(
                "frame number %s: %s pixels (threshold=%s)" %
                (self._sequence, pixel_count, max_nvp))

        #ensure the dimensions we use for decision making are the ones actually used:
        ww = ww & self.width_mask
        wh = wh & self.height_mask
        if ww < self.min_w or ww > self.max_w or wh < self.min_h or wh > self.max_h:
            #failsafe:
            return switch_to_lossless(
                "window dimensions are unsuitable for this encoder/csc")
        return self.get_core_encoding(has_alpha, current_encoding)
Ejemplo n.º 28
0
    def reconfigure(self, force_reload=False):
        """
            This is called when we want to force a full re-init (force_reload=True)
            or from the timer that allows to tune the quality and speed.
            (this tuning is done in WindowSource.reconfigure)
            Here we re-evaluate if the pipeline we are currently using
            is really the best one, and if not we switch to the best one.
            This uses get_video_pipeline_options() to get a list of pipeline
            options with a score for each.
        """
        debug("reconfigure(%s) csc_encoder=%s, video_encoder=%s", force_reload,
              self._csc_encoder, self._video_encoder)
        WindowSource.reconfigure(self, force_reload)
        if not self._video_encoder:
            return
        try:
            self._lock.acquire()
            ve = self._video_encoder
            if not ve or ve.is_closed():
                #could have been freed since we got the lock!
                return
            if force_reload:
                if self._csc_encoder:
                    self.do_csc_encoder_cleanup()
                self.do_video_encoder_cleanup()
                return

            pixel_format = None
            if self._csc_encoder:
                pixel_format = self._csc_encoder.get_src_format()
            else:
                pixel_format = ve.get_src_format()
            width = ve.get_width()
            height = ve.get_height()
            quality = self.get_current_quality()
            speed = self.get_current_speed()

            scores = self.get_video_pipeline_options(ve.get_encoding(), width,
                                                     height, pixel_format)
            if len(scores) > 0:
                debug("reconfigure(%s) best=%s", force_reload, scores[0])
                _, csc_spec, enc_in_format, encoder_spec = scores[0]
                if self._csc_encoder:
                    if csc_spec is None or \
                       type(self._csc_encoder)!=csc_spec.codec_class or \
                       self._csc_encoder.get_dst_format()!=enc_in_format:
                        debug("reconfigure(%s) found better csc encoder: %s",
                              force_reload, scores[0])
                        self.do_csc_encoder_cleanup()
                if type(self._video_encoder)!=encoder_spec.codec_class or \
                   self._video_encoder.get_src_format()!=enc_in_format:
                    debug("reconfigure(%s) found better video encoder: %s",
                          force_reload, scores[0])
                    self.do_video_encoder_cleanup()

            if self._video_encoder is None:
                self.setup_pipeline(scores, width, height, pixel_format)

            if self._video_encoder:
                self._video_encoder.set_encoding_speed(speed)
                self._video_encoder.set_encoding_quality(quality)
        finally:
            self._lock.release()