Beispiel #1
0
    def video_encode(self, wid, x, y, w, h, coding, data, rowstride, options):
        """
            This method is used by make_data_packet to encode frames using x264 or vpx.
            Video encoders only deal with fixed dimensions,
            so we must clean and reinitialize the encoder if the window dimensions
            has changed.
            Since this runs in the non-UI thread 'data_to_packet', we must
            use the 'video_encoder_lock' to prevent races.

        """
        assert x == 0 and y == 0, "invalid position: %sx%s" % (x, y)
        #time_before = time.clock()
        try:
            self._video_encoder_lock.acquire()
            if self._video_encoder:
                if self._video_encoder.get_type() != coding:
                    log("video_encode: switching from %s to %s",
                        self._video_encoder.get_type(), coding)
                    self.video_encoder_cleanup()
                elif self._video_encoder.get_width(
                ) != w or self._video_encoder.get_height() != h:
                    log(
                        "%s: window dimensions have changed from %sx%s to %sx%s",
                        coding, self._video_encoder.get_width(),
                        self._video_encoder.get_height(), w, h)
                    old_pc = self._video_encoder.get_width(
                    ) * self._video_encoder.get_height()
                    self._video_encoder.clean()
                    self._video_encoder.init_context(
                        w, h, self.encoding_client_options)
                    #if we had an encoding speed set, restore it (also scaled):
                    if len(self._video_encoder_speed):
                        _, recent_speed = calculate_time_weighted_average(
                            list(self._video_encoder_speed))
                        new_pc = w * h
                        new_speed = max(
                            0, min(100, recent_speed * new_pc / old_pc))
                        self._video_encoder.set_encoding_speed(new_speed)
            if self._video_encoder is None:
                log("%s: new encoder for wid=%s %sx%s", coding, wid, w, h)
                self._video_encoder = self.make_video_encoder(coding)
                self._video_encoder.init_context(w, h,
                                                 self.encoding_client_options)
            err, _, data = self._video_encoder.compress_image(
                data, rowstride, options)
            if err != 0:
                log.error("%s: ouch, compression error %s", coding, err)
                return None, None
            client_options = self._video_encoder.get_client_options(options)
            msg = "compress_image(..) %s wid=%s, result is %s bytes, client options=%s", coding, wid, len(
                data), client_options
            log(*msg)
            return Compressed(coding, data), client_options
        finally:
            self._video_encoder_lock.release()
Beispiel #2
0
def test_calculate_time_weighted_average():
	#event_time, value
	now = time.time()
	sample_size = 100000
	data = []
	t = now - sample_size
	for _ in xrange(sample_size):
		#v = random.randint(0, 10000)
		v = random.random()
		data.append((t, v))
		t += 1
	start = time.time()
	v = calculate_time_weighted_average(data)
	end = time.time()
	print("calculate_time_weighted_average(%s records)=%s" % (len(data), v))
	print("elapsed time: %sms" % dec1(1000*(end-start)))
Beispiel #3
0
def test_calculate_time_weighted_average():
    #event_time, value
    now = time.time()
    sample_size = 100000
    data = []
    t = now - sample_size
    for _ in xrange(sample_size):
        #v = random.randint(0, 10000)
        v = random.random()
        data.append((t, v))
        t += 1
    start = time.time()
    v = calculate_time_weighted_average(data)
    end = time.time()
    print("calculate_time_weighted_average(%s records)=%s" % (len(data), v))
    print("elapsed time: %sms" % dec1(1000 * (end - start)))
Beispiel #4
0
 def video_encode(self, wid, x, y, w, h, coding, data, rowstride, options):
     """
         This method is used by make_data_packet to encode frames using x264 or vpx.
         Video encoders only deal with fixed dimensions,
         so we must clean and reinitialize the encoder if the window dimensions
         has changed.
         Since this runs in the non-UI thread 'data_to_packet', we must
         use the 'video_encoder_lock' to prevent races.
     """
     assert x==0 and y==0, "invalid position: %s,%s" % (x,y)
     #time_before = time.clock()
     try:
         self._video_encoder_lock.acquire()
         if self._video_encoder:
             if self._video_encoder.get_type()!=coding:
                 log("video_encode: switching from %s to %s", self._video_encoder.get_type(), coding)
                 self.do_video_encoder_cleanup()
             elif self._video_encoder.get_width()!=w or self._video_encoder.get_height()!=h:
                 log("%s: window dimensions have changed from %sx%s to %sx%s", coding, self._video_encoder.get_width(), self._video_encoder.get_height(), w, h)
                 old_pc = self._video_encoder.get_width() * self._video_encoder.get_height()
                 self._video_encoder.clean()
                 self._video_encoder.init_context(w, h, self.encoding_client_options)
                 #if we had an encoding speed set, restore it (also scaled):
                 if len(self._video_encoder_speed):
                     _, recent_speed = calculate_time_weighted_average(list(self._video_encoder_speed))
                     new_pc = w * h
                     new_speed = max(0, min(100, recent_speed*new_pc/old_pc))
                     self._video_encoder.set_encoding_speed(new_speed)
         if self._video_encoder is None:
             log("%s: new encoder for wid=%s %sx%s", coding, wid, w, h)
             self._video_encoder = self.make_video_encoder(coding)
             self._video_encoder.init_context(w, h, self.encoding_client_options)
         err, _, data = self._video_encoder.compress_image(data, rowstride, options)
         if err!=0:
             log.error("%s: ouch, compression error %s", coding, err)
             return None, None
         client_options = self._video_encoder.get_client_options(options)
         log("compress_image(..) %s wid=%s, result is %s bytes, client options=%s", coding, wid, len(data), client_options)
         return Compressed(coding, data), client_options
     finally:
         self._video_encoder_lock.release()
def calculate_batch_delay(window, wid, batch, global_statistics, statistics,
                          video_encoder=None, video_encoder_lock=None, video_encoder_speed=None, video_encoder_quality=None):
    """
        Calculates a new batch delay.
        We first gather some statistics,
        then use them to calculate a number of factors.
        which are then used to adjust the batch delay in 'update_batch_delay'.
    """
    #the number of pixels which can be considered 'low' in terms of backlog.
    #Generally, just one full frame, (more with mmap because it is so fast)
    low_limit = 1024*1024
    if window:
        ww, wh = window.get_dimensions()
        low_limit = max(8*8, ww*wh)
        if global_statistics.mmap_size>0:
            #mmap can accumulate much more as it is much faster
            low_limit *= 4
    #client latency: (how long it takes for a packet to get to the client and get the echo back)
    avg_client_latency, recent_client_latency = 0.1, 0.1    #assume 100ms until we get some data
    if len(global_statistics.client_latency)>0:
        data = [(when, latency) for _, when, _, latency in list(global_statistics.client_latency)]
        avg_client_latency, recent_client_latency = calculate_time_weighted_average(data)
    #damage "in" latency: (the time it takes for damage requests to be processed only)
    avg_damage_in_latency, recent_damage_in_latency = 0, 0
    if len(statistics.damage_in_latency)>0:
        data = [(when, latency) for when, _, _, latency in list(statistics.damage_in_latency)]
        avg_damage_in_latency, recent_damage_in_latency =  calculate_time_weighted_average(data)
    #damage "out" latency: (the time it takes for damage requests to be processed and sent out)
    avg_damage_out_latency, recent_damage_out_latency = 0, 0
    if len(statistics.damage_out_latency)>0:
        data = [(when, latency) for when, _, _, latency in list(statistics.damage_out_latency)]
        avg_damage_out_latency, recent_damage_out_latency = calculate_time_weighted_average(data)
    #client decode speed:
    avg_decode_speed, recent_decode_speed = None, None
    if len(statistics.client_decode_time)>0:
        #the elapsed time recorded is in microseconds, so multiply by 1000*1000 to get the real value:
        avg_decode_speed, recent_decode_speed = calculate_timesize_weighted_average(list(statistics.client_decode_time), sizeunit=1000*1000)
    #network send speed:
    avg_send_speed, recent_send_speed = None, None
    if len(statistics.damage_send_speed)>0:
        avg_send_speed, recent_send_speed = calculate_timesize_weighted_average(list(statistics.damage_send_speed))
    #client backlog: (packets and pixels that should have been processed by now - taking into account latency)
    packets_backlog, pixels_backlog = 0, 0
    if len(statistics.damage_ack_pending)>0:
        sent_before = time.time()-avg_client_latency
        for sent_at, pixels in statistics.damage_ack_pending.values():
            if sent_at>sent_before:
                continue
            packets_backlog += 1
            pixels_backlog += pixels
    max_latency = max(avg_damage_in_latency, recent_damage_in_latency, avg_damage_out_latency, recent_damage_out_latency)

    #for each indicator: (description, factor, weight)
    factors = []

    #damage "in" latency factor:
    if len(statistics.damage_in_latency)>0:
        msg = "damage processing latency:"
        target_latency = 0.010 + (0.050*low_limit/1024.0/1024.0)
        factors.append(calculate_for_target(msg, target_latency, avg_damage_in_latency, recent_damage_in_latency, aim=0.8, slope=0.005, smoothing=sqrt))
    #damage "out" latency
    if len(statistics.damage_out_latency)>0:
        msg = "damage send latency:"
        target_latency = 0.025 + (0.060*low_limit/1024.0/1024.0)
        factors.append(calculate_for_target(msg, target_latency, avg_damage_out_latency, recent_damage_out_latency, aim=0.8, slope=0.010, smoothing=sqrt))
    #send speed:
    if avg_send_speed is not None and recent_send_speed is not None:
        #our calculate methods aims for lower values, so invert speed
        #this is how long it takes to send 1MB:
        avg1MB = 1.0*1024*1024/avg_send_speed
        recent1MB = 1.0*1024*1024/recent_send_speed
        #we only really care about this when the speed is quite low,
        #so adjust the weight accordingly:
        minspeed = float(128*1024)
        div = logp(max(recent_send_speed, minspeed)/minspeed)
        msg = "network send speed: avg=%s, recent=%s (KBytes/s), div=%s" % (int(avg_send_speed/1024), int(recent_send_speed/1024), div)
        factors.append(calculate_for_average(msg, avg1MB, recent1MB, weight_offset=1.0, weight_div=div))
    #client decode time:
    if avg_decode_speed is not None and recent_decode_speed is not None:
        msg = "client decode speed: avg=%s, recent=%s (MPixels/s)" % (dec1(avg_decode_speed/1000/1000), dec1(recent_decode_speed/1000/1000))
        #our calculate methods aims for lower values, so invert speed
        #this is how long it takes to send 1MB:
        avg1MB = 1.0*1024*1024/avg_decode_speed
        recent1MB = 1.0*1024*1024/recent_decode_speed
        factors.append(calculate_for_average(msg, avg1MB, recent1MB, weight_offset=0.0))
    #elapsed time without damage:
    if batch.last_updated>0:
        #If nothing happens for a while then we can reduce the batch delay,
        #however we must ensure this is not caused by a high damage latency
        #so we ignore short elapsed times.
        ignore_time = max(max_latency+batch.recalculate_delay, batch.delay+batch.recalculate_delay)
        ignore_count = 2 + ignore_time / batch.recalculate_delay
        elapsed = time.time()-batch.last_updated
        n_skipped_calcs = elapsed / batch.recalculate_delay
        #the longer the elapsed time, the more we slash:
        weight = logp(max(0, n_skipped_calcs-ignore_count))
        msg = "delay not updated for %s ms (skipped %s times - highest latency is %s)" % (dec1(1000*elapsed), int(n_skipped_calcs), dec1(1000*max_latency))
        factors.append((msg, 0, weight))
    #client latency: (we want to keep client latency as low as can be)
    if len(global_statistics.client_latency)>0 and avg_client_latency is not None and recent_client_latency is not None:
        target_latency = 0.005
        if global_statistics.min_client_latency:
            target_latency = max(target_latency, global_statistics.min_client_latency)
        msg = "client latency:"
        factors.append(calculate_for_target(msg, target_latency, avg_client_latency, recent_client_latency, aim=0.8, slope=0.005, smoothing=sqrt, weight_multiplier=4.0))
    #damage packet queue size: (includes packets from all windows)
    factors.append(queue_inspect("damage packet queue size:", global_statistics.damage_packet_qsizes, smoothing=sqrt))
    #damage pixels waiting in the packet queue: (extract data for our window id only)
    time_values = [(event_time, value) for event_time, dwid, value in list(global_statistics.damage_packet_qpixels) if dwid==wid]
    factors.append(queue_inspect("damage packet queue pixels:", time_values, div=low_limit, smoothing=sqrt))
    #damage data queue: (This is an important metric since each item will consume a fair amount of memory and each will later on go through the other queues.)
    factors.append(queue_inspect("damage data queue:", global_statistics.damage_data_qsizes))
    last_packets_backlog, last_pixels_backlog = 0, 0
    if statistics.last_packet_send_stats is not None:
        #packet and pixels backlog:
        last_packets_backlog, last_pixels_backlog = statistics.last_client_delta
        factors.append(calculate_for_target("client packets backlog:", 0, last_packets_backlog, packets_backlog, slope=1.0, smoothing=sqrt))
        factors.append(calculate_for_target("client pixels backlog:", 0, last_pixels_backlog, pixels_backlog, div=low_limit, slope=1.0, smoothing=sqrt))
    if global_statistics.mmap_size>0:
        #full: effective range is 0.0 to ~1.2
        full = 1.0-float(global_statistics.mmap_free_size)/global_statistics.mmap_size
        #aim for ~50%
        factors.append(("mmap area %s%% full" % int(100*full), logp(2*full), 2*full))
    #now use those factors to drive the delay change:
    update_batch_delay(batch, factors)
    #***************************************************************
    #special hook for video encoders
    if (not AUTO_QUALITY and not AUTO_SPEED) or video_encoder is None:
        return

    #***********************************************************
    # encoding speed:
    #    0    for highest compression/slower
    #    100  for lowest compression/fast
    # here we try to minimize damage-latency and client decoding speed
    min_damage_latency = 0.010 + (0.050*low_limit/1024.0/1024.0)
    target_damage_latency = min_damage_latency + batch.delay/1000.0
    dam_lat = (avg_damage_in_latency or 0)/target_damage_latency
    target_decode_speed = 1*1000*1000      #1 MPixels/s
    dec_lat = target_decode_speed/(avg_decode_speed or target_decode_speed)
    target = max(dam_lat, dec_lat, 0.0)
    target_speed = 100.0 * min(1.0, target)
    video_encoder_speed.append((time.time(), target_speed))
    _, new_speed = calculate_time_weighted_average(video_encoder_speed)
    msg = "video encoder speed factors: min_damage_latency=%s, target_damage_latency=%s, batch.delay=%s, dam_lat=%s, dec_lat=%s, target=%s, new_speed=%s", \
             dec2(min_damage_latency), dec2(target_damage_latency), dec2(batch.delay), dec2(dam_lat), dec2(dec_lat), int(target_speed), int(new_speed)
    log(*msg)
    if DEBUG_DELAY:
        add_DEBUG_DELAY_MESSAGE(msg)
    #***********************************************************
    # quality:
    #    0    for lowest quality (low bandwidth usage)
    #    100  for best quality (high bandwidth usage)
    # here we try minimize client-latency, packet-backlog and batch.delay
    packets_bl = 1.0 - logp(last_packets_backlog/low_limit)
    batch_q = 4.0 * batch.min_delay / batch.delay
    target = max(packets_bl, batch_q)
    latency_q = 0.0
    if len(global_statistics.client_latency)>0 and avg_client_latency is not None and recent_client_latency is not None:
        target_latency = 0.005
        if global_statistics.min_client_latency:
            target_latency = max(target_latency, global_statistics.min_client_latency)
        latency_q = 4.0 * target_latency / recent_client_latency
        target = min(target, latency_q)
    target_quality = 100.0*(min(1.0, max(0.0, target)))
    video_encoder_quality.append((time.time(), target_quality))
    new_quality, _ = calculate_time_weighted_average(video_encoder_quality)
    msg = "video encoder quality factors: packets_bl=%s, batch_q=%s, latency_q=%s, target=%s, new_quality=%s", \
             dec2(packets_bl), dec2(batch_q), dec2(latency_q), int(target_quality), int(new_quality)
    log(*msg)
    if DEBUG_DELAY:
        add_DEBUG_DELAY_MESSAGE(msg)
    try:
        video_encoder_lock.acquire()
        if AUTO_SPEED:
            video_encoder.set_encoding_speed(new_speed)
        if AUTO_QUALITY:
            video_encoder.set_encoding_quality(new_quality)
    finally:
        video_encoder_lock.release()
Beispiel #6
0
def calculate_batch_delay(window, wid, batch, global_statistics, statistics,
                          video_encoder=None, video_encoder_lock=None, video_encoder_speed=None, video_encoder_quality=None,
                          fixed_quality=-1, fixed_speed=-1):
    """
        Calculates a new batch delay.
        We first gather some statistics,
        then use them to calculate a number of factors.
        which are then used to adjust the batch delay in 'update_batch_delay'.
    """
    #the number of pixels which can be considered 'low' in terms of backlog.
    #Generally, just one full frame, (more with mmap because it is so fast)
    low_limit = 1024*1024
    if window:
        ww, wh = window.get_dimensions()
        low_limit = max(8*8, ww*wh)
        if global_statistics.mmap_size>0:
            #mmap can accumulate much more as it is much faster
            low_limit *= 4
    #client latency: (how long it takes for a packet to get to the client and get the echo back)
    avg_client_latency, recent_client_latency = 0.1, 0.1    #assume 100ms until we get some data
    if len(global_statistics.client_latency)>0:
        data = [(when, latency) for _, when, _, latency in list(global_statistics.client_latency)]
        avg_client_latency, recent_client_latency = calculate_time_weighted_average(data)
        global_statistics.avg_client_latency = avg_client_latency
    #client ping latency: from ping packets
    avg_client_ping_latency, recent_client_ping_latency = 0.1, 0.1    #assume 100ms until we get some data
    if len(global_statistics.client_ping_latency)>0:
        avg_client_ping_latency, recent_client_ping_latency = calculate_time_weighted_average(list(global_statistics.client_ping_latency))
    #server ping latency: from ping packets
    avg_server_ping_latency, recent_server_ping_latency = 0.1, 0.1    #assume 100ms until we get some data
    if len(global_statistics.server_ping_latency)>0:
        avg_server_ping_latency, recent_server_ping_latency = calculate_time_weighted_average(list(global_statistics.server_ping_latency))
    #damage "in" latency: (the time it takes for damage requests to be processed only)
    avg_damage_in_latency, recent_damage_in_latency = 0, 0
    if len(statistics.damage_in_latency)>0:
        data = [(when, latency) for when, _, _, latency in list(statistics.damage_in_latency)]
        avg_damage_in_latency, recent_damage_in_latency =  calculate_time_weighted_average(data)
    #damage "out" latency: (the time it takes for damage requests to be processed and sent out)
    avg_damage_out_latency, recent_damage_out_latency = 0, 0
    if len(statistics.damage_out_latency)>0:
        data = [(when, latency) for when, _, _, latency in list(statistics.damage_out_latency)]
        avg_damage_out_latency, recent_damage_out_latency = calculate_time_weighted_average(data)
    #client decode speed:
    avg_decode_speed, recent_decode_speed = None, None
    if len(statistics.client_decode_time)>0:
        #the elapsed time recorded is in microseconds, so multiply by 1000*1000 to get the real value:
        avg_decode_speed, recent_decode_speed = calculate_timesize_weighted_average(list(statistics.client_decode_time), sizeunit=1000*1000)
    #network send speed:
    avg_send_speed, recent_send_speed = None, None
    if len(statistics.damage_send_speed)>0:
        avg_send_speed, recent_send_speed = calculate_timesize_weighted_average(list(statistics.damage_send_speed))
    max_latency = max(avg_damage_in_latency, recent_damage_in_latency, avg_damage_out_latency, recent_damage_out_latency)

    #for each indicator: (description, factor, weight)
    factors = []

    #damage "in" latency factor:
    if len(statistics.damage_in_latency)>0:
        msg = "damage processing latency:"
        target_latency = 0.010 + (0.050*low_limit/1024.0/1024.0)
        factors.append(calculate_for_target(msg, target_latency, avg_damage_in_latency, recent_damage_in_latency, aim=0.8, slope=0.005, smoothing=sqrt))
    #damage "out" latency
    if len(statistics.damage_out_latency)>0:
        msg = "damage send latency:"
        target_latency = 0.025 + (0.060*low_limit/1024.0/1024.0)
        factors.append(calculate_for_target(msg, target_latency, avg_damage_out_latency, recent_damage_out_latency, aim=0.8, slope=0.010, smoothing=sqrt))
    #send speed:
    if avg_send_speed is not None and recent_send_speed is not None:
        #our calculate methods aims for lower values, so invert speed
        #this is how long it takes to send 1MB:
        avg1MB = 1.0*1024*1024/avg_send_speed
        recent1MB = 1.0*1024*1024/recent_send_speed
        #we only really care about this when the speed is quite low,
        #so adjust the weight accordingly:
        minspeed = float(128*1024)
        div = logp(max(recent_send_speed, minspeed)/minspeed)
        msg = "network send speed: avg=%s, recent=%s (KBytes/s), div=%s" % (int(avg_send_speed/1024), int(recent_send_speed/1024), div)
        factors.append(calculate_for_average(msg, avg1MB, recent1MB, weight_offset=1.0, weight_div=div))
    #client decode time:
    if avg_decode_speed is not None and recent_decode_speed is not None:
        msg = "client decode speed: avg=%s, recent=%s (MPixels/s)" % (dec1(avg_decode_speed/1000/1000), dec1(recent_decode_speed/1000/1000))
        #our calculate methods aims for lower values, so invert speed
        #this is how long it takes to send 1MB:
        avg1MB = 1.0*1024*1024/avg_decode_speed
        recent1MB = 1.0*1024*1024/recent_decode_speed
        factors.append(calculate_for_average(msg, avg1MB, recent1MB, weight_offset=0.0))
    #elapsed time without damage:
    if batch.last_updated>0:
        #If nothing happens for a while then we can reduce the batch delay,
        #however we must ensure this is not caused by a high damage latency
        #so we ignore short elapsed times.
        ignore_time = max(max_latency+batch.recalculate_delay, batch.delay+batch.recalculate_delay)
        ignore_count = 2 + ignore_time / batch.recalculate_delay
        elapsed = time.time()-batch.last_updated
        n_skipped_calcs = elapsed / batch.recalculate_delay
        #the longer the elapsed time, the more we slash:
        weight = logp(max(0, n_skipped_calcs-ignore_count))
        msg = "delay not updated for %s ms (skipped %s times - highest latency is %s)" % (dec1(1000*elapsed), int(n_skipped_calcs), dec1(1000*max_latency))
        factors.append((msg, 0, weight))

    target_latency = statistics.get_target_client_latency(global_statistics.min_client_latency, avg_client_latency)
    if len(global_statistics.client_latency)>0 and avg_client_latency is not None and recent_client_latency is not None:
        #client latency: (we want to keep client latency as low as can be)
        msg = "client latency:"
        factors.append(calculate_for_target(msg, target_latency, avg_client_latency, recent_client_latency, aim=0.8, slope=0.005, smoothing=sqrt))
    if len(global_statistics.client_ping_latency)>0:
        msg = "client ping latency:"
        factors.append(calculate_for_target(msg, target_latency, avg_client_ping_latency, recent_client_ping_latency, aim=0.95, slope=0.005, smoothing=sqrt, weight_multiplier=0.25))
    if len(global_statistics.server_ping_latency)>0:
        msg = "server ping latency:"
        factors.append(calculate_for_target(msg, target_latency, avg_server_ping_latency, recent_server_ping_latency, aim=0.95, slope=0.005, smoothing=sqrt, weight_multiplier=0.25))
    #damage packet queue size: (includes packets from all windows)
    factors.append(queue_inspect("damage packet queue size:", global_statistics.damage_packet_qsizes, smoothing=sqrt))
    #damage pixels waiting in the packet queue: (extract data for our window id only)
    time_values = [(event_time, value) for event_time, dwid, value in list(global_statistics.damage_packet_qpixels) if dwid==wid]
    factors.append(queue_inspect("damage packet queue pixels:", time_values, div=low_limit, smoothing=sqrt))
    #damage data queue: (This is an important metric since each item will consume a fair amount of memory and each will later on go through the other queues.)
    factors.append(queue_inspect("damage data queue:", global_statistics.damage_data_qsizes))
    if global_statistics.mmap_size>0:
        #full: effective range is 0.0 to ~1.2
        full = 1.0-float(global_statistics.mmap_free_size)/global_statistics.mmap_size
        #aim for ~50%
        factors.append(("mmap area %s%% full" % int(100*full), logp(2*full), 2*full))
    #now use those factors to drive the delay change:
    update_batch_delay(batch, factors)
    #***************************************************************
    #special hook for video encoders
    if video_encoder is None:
        return

    #***********************************************************
    # encoding speed:
    #    0    for highest compression/slower
    #    100  for lowest compression/fast
    # here we try to minimize damage-latency and client decoding speed
    if fixed_speed>=0:
        new_speed = fixed_speed
        msg = "video encoder using fixed speed: %s", fixed_speed
    else:
        min_damage_latency = 0.010 + (0.050*low_limit/1024.0/1024.0)
        target_damage_latency = min_damage_latency + batch.delay/1000.0
        dam_lat = (avg_damage_in_latency or 0)/target_damage_latency
        target_decode_speed = 1*1000*1000      #1 MPixels/s
        dec_lat = 0.0
        if avg_decode_speed:
            dec_lat = target_decode_speed/(avg_decode_speed or target_decode_speed)
        target = max(dam_lat, dec_lat, 0.0)
        target_speed = 100.0 * min(1.0, target)
        video_encoder_speed.append((time.time(), target_speed))
        _, new_speed = calculate_time_weighted_average(video_encoder_speed)
        msg = "video encoder speed factors: min_damage_latency=%s, target_damage_latency=%s, batch.delay=%s, dam_lat=%s, dec_lat=%s, target=%s, new_speed=%s", \
                 dec2(min_damage_latency), dec2(target_damage_latency), dec2(batch.delay), dec2(dam_lat), dec2(dec_lat), int(target_speed), int(new_speed)
    log(*msg)
    if DEBUG_DELAY:
        add_DEBUG_DELAY_MESSAGE(msg)
    #***********************************************************
    # quality:
    #    0    for lowest quality (low bandwidth usage)
    #    100  for best quality (high bandwidth usage)
    # here we try minimize client-latency, packet-backlog and batch.delay
    if fixed_quality>=0:
        new_quality = fixed_quality
        msg = "video encoder using fixed quality: %s", fixed_quality
    else:
        packets_backlog, _, _ = statistics.get_backlog(target_latency)
        packets_bl = 1.0 - logp(packets_backlog/low_limit)
        batch_q = 4.0 * batch.min_delay / batch.delay
        target = max(packets_bl, batch_q)
        latency_q = 0.0
        if len(global_statistics.client_latency)>0 and recent_client_latency>0:
            latency_q = 4.0 * target_latency / recent_client_latency
            target = min(target, latency_q)
        target_quality = 100.0*(min(1.0, max(0.0, target)))
        video_encoder_quality.append((time.time(), target_quality))
        new_quality, _ = calculate_time_weighted_average(video_encoder_quality)
        msg = "video encoder quality factors: packets_bl=%s, batch_q=%s, latency_q=%s, target=%s, new_quality=%s", \
                 dec2(packets_bl), dec2(batch_q), dec2(latency_q), int(target_quality), int(new_quality)
    log(*msg)
    if DEBUG_DELAY:
        add_DEBUG_DELAY_MESSAGE(msg)
    try:
        video_encoder_lock.acquire()
        video_encoder.set_encoding_speed(new_speed)
        video_encoder.set_encoding_quality(new_quality)
    finally:
        video_encoder_lock.release()