예제 #1
0
 def may_send_delayed(self):
     """ send the delayed region for processing if there is no client backlog """
     if not self._damage_delayed:
         log("window %s already removed from delayed list?", self.wid)
         return False
     damage_time = self._damage_delayed[0]
     packets_backlog = self.get_packets_backlog()
     now = time.time()
     actual_delay = 1000.0 * (time.time() - damage_time)
     if packets_backlog > 0:
         if actual_delay < self.batch_config.max_delay:
             log(
                 "send_delayed for wid %s, delaying again because of backlog: %s packets, batch delay is %s, elapsed time is %s ms",
                 self.wid, packets_backlog, self.batch_config.delay,
                 dec1(actual_delay))
             #this method will get fired again damage_packet_acked
             return False
         else:
             log.warn(
                 "send_delayed for wid %s, elapsed time %s is above limit of %s - sending now",
                 self.wid, dec1(actual_delay),
                 dec1(self.batch_config.max_delay))
     else:
         log(
             "send_delayed for wid %s, batch delay is %s, elapsed time is %s ms",
             self.wid, dec1(self.batch_config.delay), dec1(actual_delay))
     self.batch_config.last_actual_delays.append((now, actual_delay))
     self.do_send_delayed_region()
     return False
예제 #2
0
 def damage_packet_acked(self, damage_packet_sequence, width, height,
                         decode_time):
     """
         The client is acknowledging a damage packet,
         we record the 'client decode time' (provided by the client itself)
         and the "client latency".
     """
     log("packet decoding for window %s %sx%s took %s µs", self.wid, width,
         height, decode_time)
     if decode_time:
         self.statistics.client_decode_time.append(
             (time.time(), width * height, decode_time))
     pending = self.statistics.damage_ack_pending.get(
         damage_packet_sequence)
     if pending is None:
         log("cannot find sent time for sequence %s",
             damage_packet_sequence)
         return
     del self.statistics.damage_ack_pending[damage_packet_sequence]
     if decode_time > 0:
         sent_at, pixels = pending
         now = time.time()
         diff = now - sent_at
         latency = max(0, diff - decode_time / 1000 / 1000)
         log(
             "client_ack_damage: took %s ms round trip, %s for decoding of %s pixels, %s for network",
             dec1(diff * 1000), dec1(decode_time / 1000), pixels,
             dec1(latency * 1000))
         self.global_statistics.record_latency(self.wid, pixels, latency)
예제 #3
0
 def record_latency(self, wid, decode_time, start_send_at, end_send_at,
                    pixels, bytecount):
     now = time.time()
     send_diff = now - start_send_at
     echo_diff = now - end_send_at
     send_latency = max(0, send_diff - decode_time / 1000.0 / 1000.0)
     echo_latency = max(0, echo_diff - decode_time / 1000.0 / 1000.0)
     log(
         "record_latency: took %s ms round trip (%s just for echo), %s for decoding of %s pixels, %s bytes sent over the network in %s ms (%s ms for echo)",
         dec1(send_diff * 1000), dec1(echo_diff * 1000),
         dec1(decode_time / 1000.0), pixels, bytecount,
         dec1(send_latency * 1000), dec1(echo_latency * 1000))
     if self.min_client_latency is None or self.min_client_latency > send_latency:
         self.min_client_latency = send_latency
     self.client_latency.append((wid, time.time(), pixels, send_latency))
예제 #4
0
def update_batch_delay(batch, factors):
    """
        Given a list of factors of the form:
        [(description, factor, weight)]
        we calculate a new batch delay.
        We use a time-weighted average of previous delays as a starting value,
        then combine it with the new factors.
    """
    last_updated = batch.last_updated
    current_delay = batch.delay
    now = time.time()
    avg = 0
    tv, tw = 0.0, 0.0
    decay = max(1, logp(current_delay / batch.min_delay) / 5.0)
    if len(batch.last_delays) > 0:
        #get the weighted average
        #older values matter less, we decay them according to how much we batch already
        #(older values matter more when we batch a lot)
        for when, delay in batch.last_delays:
            #newer matter more:
            w = 1.0 / (1.0 + ((now - when) / decay)**2)
            d = max(batch.min_delay, min(batch.max_delay, delay))
            tv += d * w
            tw += w
        avg = tv / tw
    hist_w = tw

    valid_factors = [x for x in factors if x is not None]
    all_factors_weight = sum([w for _, _, w in valid_factors])
    if all_factors_weight == 0:
        log("update_batch_delay: no weights yet!")
        return
    max_factor = max([f for _, f, _ in valid_factors])
    #mitigate low factors if we have some really high ones:
    factor_min_limit = min(0.5, max_factor / 10.0)
    for _, factor, weight in valid_factors:
        actual_factor = max(factor_min_limit, factor)
        target_delay = max(batch.min_delay,
                           min(batch.max_delay, current_delay * actual_factor))
        w = max(1, hist_w) * weight / all_factors_weight
        tw += w
        tv += target_delay * w
    batch.delay = max(batch.min_delay, min(batch.max_delay, tv / tw))
    batch.last_updated = now
    if DEBUG_DELAY:
        decimal_delays = [dec1(x) for _, x in batch.last_delays]
        if len(decimal_delays) == 0:
            decimal_delays.append(0)
        logfactors = [(msg, dec2(f), dec2(w)) for (msg, f, w) in valid_factors]
        rec = (
            "update_batch_delay: wid=%s, last updated %s ms ago, decay=%s, change factor=%s%%, delay min=%s, avg=%s, max=%s, cur=%s, w. average=%s, tot wgt=%s, hist_w=%s, new delay=%s\n %s",
            batch.wid, dec2(1000.0 * now - 1000.0 * last_updated), dec2(decay),
            dec1(100 * (batch.delay / current_delay - 1)), min(decimal_delays),
            dec1(sum(decimal_delays) / len(decimal_delays)),
            max(decimal_delays), dec1(current_delay), dec1(avg), dec1(tw),
            dec1(hist_w), dec1(batch.delay),
            "\n ".join([str(x) for x in logfactors]))
        add_DEBUG_DELAY_MESSAGE(rec)
예제 #5
0
def test_logp():
    start = time.time()
    for _ in xrange(100000):
        x = random.random()
        logp(x)
    end = time.time()
    print("logp:")
    print("elapsed time: %sms" % dec1(1000 * (end - start)))
예제 #6
0
def test_logp():
	start = time.time()
	for _ in xrange(100000):
		x = random.random()
		logp(x)
	end = time.time()
	print("logp:")
	print("elapsed time: %sms" % dec1(1000*(end-start)))
예제 #7
0
    def process_damage_region(self, damage_time, window, x, y, w, h, coding,
                              options):
        """
            Called by 'damage' or 'send_delayed_regions' to process a damage region,
            we extract the rgb data from the pixmap and place it on the damage queue.
            This runs in the UI thread.
        """
        if w == 0 or h == 0:
            return
        # It's important to acknowledge changes *before* we extract them,
        # to avoid a race condition.
        if not window.is_managed():
            log.warn("the window %s is not composited!?", window)
            return
        window.acknowledge_changes()

        self._sequence += 1
        sequence = self._sequence
        pixmap = window.get_property("client-contents")
        if self.is_cancelled(sequence):
            log("get_window_pixmap: dropping damage request with sequence=%s",
                sequence)
            return None
        if pixmap is None:
            log.error(
                "get_window_pixmap: wtf, pixmap is None for window %s, wid=%s",
                window, self.wid)
            return
        process_damage_time = time.time()
        data = get_rgb_rawdata(damage_time, process_damage_time, self.wid,
                               pixmap, x, y, w, h, coding, sequence, options)
        if not data:
            return
        log(
            "process_damage_regions: adding pixel data %s to queue, elapsed time: %s ms",
            data[:6], dec1(1000 * (time.time() - damage_time)))

        def make_data_packet(*args):
            #NOTE: this function is called from the damage data thread!
            packet = self.make_data_packet(*data)
            if packet:
                self.queue_damage_packet(pixmap, packet, damage_time,
                                         process_damage_time)
                if self.encoding in ("png", "rgb24"):
                    #primary encoding is lossless, no need for auto-refresh
                    return
                #auto-refresh:
                if self.auto_refresh_delay > 0 and not self.is_cancelled(
                        sequence):
                    client_options = packet[
                        10]  #info about this packet from the encoder
                    gobject.idle_add(self.schedule_auto_refresh, window, w, h,
                                     coding, options, client_options)

        self.queue_damage(make_data_packet)
예제 #8
0
 def mmap_send(self, coding, data):
     start = time.time()
     mmap_data = self._mmap_send(data)
     end = time.time()
     log("%s MBytes/s - %s bytes written to mmap in %s ms",
         int(len(data) / (end - start) / 1024 / 1024), len(data),
         dec1(1000 * (end - start)))
     if mmap_data is not None:
         self.global_statistics.mmap_bytes_sent += len(data)
         coding = "mmap"
         data = mmap_data
     return coding, data
예제 #9
0
 def mmap_send(self, coding, data):
     start = time.time()
     mmap_data = self._mmap_send(data)
     elapsed = time.time() - start + 0.000000001  #make sure never zero!
     log("%s MBytes/s - %s bytes written to mmap in %s ms",
         int(len(data) / elapsed / 1024 / 1024), len(data),
         dec1(1000 * elapsed))
     if mmap_data is not None:
         self.global_statistics.mmap_bytes_sent += len(data)
         coding = "mmap"
         data = mmap_data
     return coding, data
예제 #10
0
 def send_delayed():
     """ move the delayed rectangles to the expired list """
     if self._damage_delayed:
         damage_time = self._damage_delayed[0]
         log(
             "send_delayed for wid %s, batch delay is %s, elapsed time is %s ms",
             self.wid, self.batch_config.delay,
             dec1(1000 * (time.time() - damage_time)))
         delayed = self._damage_delayed
         self._damage_delayed = None
         self.send_delayed_regions(*delayed)
     else:
         log("window %s already removed from delayed list?", self.wid)
     return False
예제 #11
0
def update_batch_delay(batch, factors):
    """
        Given a list of factors of the form:
        [(description, factor, weight)]
        we calculate a new batch delay.
        We use a time-weighted average of previous delays as a starting value,
        then combine it with the new factors.
    """
    last_updated = batch.last_updated
    current_delay = batch.delay
    now = time.time()
    avg = 0
    tv, tw = 0.0, 0.0
    decay = max(1, logp(current_delay/batch.min_delay)/5.0)
    if len(batch.last_delays)>0:
        #get the weighted average
        #older values matter less, we decay them according to how much we batch already
        #(older values matter more when we batch a lot)
        for when, delay in batch.last_delays:
            #newer matter more:
            w = 1.0/(1.0+((now-when)/decay)**2)
            d = max(batch.min_delay, min(batch.max_delay, delay))
            tv += d*w
            tw += w
        avg = tv / tw
    hist_w = tw

    valid_factors = [x for x in factors if x is not None]
    all_factors_weight = sum([w for _,_,w in valid_factors])
    if all_factors_weight==0:
        log("update_batch_delay: no weights yet!")
        return
    max_factor = max([f for _,f,_ in valid_factors])
    #mitigate low factors if we have some really high ones:
    factor_min_limit = min(0.5, max_factor/10.0)
    for _, factor, weight in valid_factors:
        actual_factor = max(factor_min_limit, factor)
        target_delay = max(batch.min_delay, min(batch.max_delay, current_delay*actual_factor))
        w = max(1, hist_w)*weight/all_factors_weight
        tw += w
        tv += target_delay*w
    batch.delay = max(batch.min_delay, min(batch.max_delay, tv / tw))
    batch.last_updated = now
    if DEBUG_DELAY:
        decimal_delays = [dec1(x) for _,x in batch.last_delays]
        if len(decimal_delays)==0:
            decimal_delays.append(0)
        logfactors = [(msg, dec2(f), dec2(w)) for (msg, f, w) in valid_factors]
        rec = ("update_batch_delay: wid=%s, last updated %s ms ago, decay=%s, change factor=%s%%, delay min=%s, avg=%s, max=%s, cur=%s, w. average=%s, tot wgt=%s, hist_w=%s, new delay=%s\n %s",
                batch.wid, dec2(1000.0*now-1000.0*last_updated), dec2(decay), dec1(100*(batch.delay/current_delay-1)), min(decimal_delays), dec1(sum(decimal_delays)/len(decimal_delays)), max(decimal_delays),
                dec1(current_delay), dec1(avg), dec1(tw), dec1(hist_w), dec1(batch.delay), "\n ".join([str(x) for x in logfactors]))
        add_DEBUG_DELAY_MESSAGE(rec)
예제 #12
0
def test_calculate_time_weighted_average():
	#event_time, value
	now = time.time()
	sample_size = 100000
	data = []
	t = now - sample_size
	for _ in xrange(sample_size):
		#v = random.randint(0, 10000)
		v = random.random()
		data.append((t, v))
		t += 1
	start = time.time()
	v = calculate_time_weighted_average(data)
	end = time.time()
	print("calculate_time_weighted_average(%s records)=%s" % (len(data), v))
	print("elapsed time: %sms" % dec1(1000*(end-start)))
예제 #13
0
def test_calculate_time_weighted_average():
    #event_time, value
    now = time.time()
    sample_size = 100000
    data = []
    t = now - sample_size
    for _ in xrange(sample_size):
        #v = random.randint(0, 10000)
        v = random.random()
        data.append((t, v))
        t += 1
    start = time.time()
    v = calculate_time_weighted_average(data)
    end = time.time()
    print("calculate_time_weighted_average(%s records)=%s" % (len(data), v))
    print("elapsed time: %sms" % dec1(1000 * (end - start)))
예제 #14
0
 def may_send_delayed(self):
     """ send the delayed region for processing if there is no client backlog """
     if not self._damage_delayed:
         log("window %s already removed from delayed list?", self.wid)
         return False
     damage_time = self._damage_delayed[0]
     packets_backlog = self.get_packets_backlog()
     now = time.time()
     actual_delay = 1000.0*(time.time()-damage_time)
     if packets_backlog>0:
         if actual_delay<self.batch_config.max_delay:
             log("send_delayed for wid %s, delaying again because of backlog: %s packets, batch delay is %s, elapsed time is %s ms",
                     self.wid, packets_backlog, self.batch_config.delay, dec1(actual_delay))
             #this method will get fired again damage_packet_acked
             return False
         else:
             log.warn("send_delayed for wid %s, elapsed time %s is above limit of %s - sending now", self.wid, dec1(actual_delay), dec1(self.batch_config.max_delay))
     else:
         log("send_delayed for wid %s, batch delay is %s, elapsed time is %s ms", self.wid, dec1(self.batch_config.delay), dec1(actual_delay))
     self.batch_config.last_actual_delays.append((now, actual_delay))
     self.do_send_delayed_region()
     return False
예제 #15
0
    def process_damage_region(self, damage_time, pixmap, x, y, w, h, coding,
                              sequence, options):
        """
            Called by 'damage_now' or 'send_delayed_regions' to process a damage region,
            we extract the rgb data from the pixmap and place it on the damage queue.
        """
        if w == 0 or h == 0:
            return
        process_damage_time = time.time()
        data = get_rgb_rawdata(damage_time, process_damage_time, self.wid,
                               pixmap, x, y, w, h, coding, sequence, options)
        if data:
            log(
                "process_damage_regions: adding pixel data %s to queue, elapsed time: %s ms",
                data[:6], dec1(1000 * (time.time() - damage_time)))

            def make_data_packet(*args):
                #NOTE: this function is called from the damage data thread!
                packet = self.make_data_packet(*data)
                if packet:
                    self.queue_damage_packet(packet, damage_time,
                                             process_damage_time)

            self.queue_damage(make_data_packet)
예제 #16
0
def calculate_batch_delay(window, wid, batch, global_statistics, statistics,
                          video_encoder=None, video_encoder_lock=None, video_encoder_speed=None, video_encoder_quality=None,
                          fixed_quality=-1, fixed_speed=-1):
    """
        Calculates a new batch delay.
        We first gather some statistics,
        then use them to calculate a number of factors.
        which are then used to adjust the batch delay in 'update_batch_delay'.
    """
    #the number of pixels which can be considered 'low' in terms of backlog.
    #Generally, just one full frame, (more with mmap because it is so fast)
    low_limit = 1024*1024
    if window:
        ww, wh = window.get_dimensions()
        low_limit = max(8*8, ww*wh)
        if global_statistics.mmap_size>0:
            #mmap can accumulate much more as it is much faster
            low_limit *= 4
    #client latency: (how long it takes for a packet to get to the client and get the echo back)
    avg_client_latency, recent_client_latency = 0.1, 0.1    #assume 100ms until we get some data
    if len(global_statistics.client_latency)>0:
        data = [(when, latency) for _, when, _, latency in list(global_statistics.client_latency)]
        avg_client_latency, recent_client_latency = calculate_time_weighted_average(data)
        global_statistics.avg_client_latency = avg_client_latency
    #client ping latency: from ping packets
    avg_client_ping_latency, recent_client_ping_latency = 0.1, 0.1    #assume 100ms until we get some data
    if len(global_statistics.client_ping_latency)>0:
        avg_client_ping_latency, recent_client_ping_latency = calculate_time_weighted_average(list(global_statistics.client_ping_latency))
    #server ping latency: from ping packets
    avg_server_ping_latency, recent_server_ping_latency = 0.1, 0.1    #assume 100ms until we get some data
    if len(global_statistics.server_ping_latency)>0:
        avg_server_ping_latency, recent_server_ping_latency = calculate_time_weighted_average(list(global_statistics.server_ping_latency))
    #damage "in" latency: (the time it takes for damage requests to be processed only)
    avg_damage_in_latency, recent_damage_in_latency = 0, 0
    if len(statistics.damage_in_latency)>0:
        data = [(when, latency) for when, _, _, latency in list(statistics.damage_in_latency)]
        avg_damage_in_latency, recent_damage_in_latency =  calculate_time_weighted_average(data)
    #damage "out" latency: (the time it takes for damage requests to be processed and sent out)
    avg_damage_out_latency, recent_damage_out_latency = 0, 0
    if len(statistics.damage_out_latency)>0:
        data = [(when, latency) for when, _, _, latency in list(statistics.damage_out_latency)]
        avg_damage_out_latency, recent_damage_out_latency = calculate_time_weighted_average(data)
    #client decode speed:
    avg_decode_speed, recent_decode_speed = None, None
    if len(statistics.client_decode_time)>0:
        #the elapsed time recorded is in microseconds, so multiply by 1000*1000 to get the real value:
        avg_decode_speed, recent_decode_speed = calculate_timesize_weighted_average(list(statistics.client_decode_time), sizeunit=1000*1000)
    #network send speed:
    avg_send_speed, recent_send_speed = None, None
    if len(statistics.damage_send_speed)>0:
        avg_send_speed, recent_send_speed = calculate_timesize_weighted_average(list(statistics.damage_send_speed))
    max_latency = max(avg_damage_in_latency, recent_damage_in_latency, avg_damage_out_latency, recent_damage_out_latency)

    #for each indicator: (description, factor, weight)
    factors = []

    #damage "in" latency factor:
    if len(statistics.damage_in_latency)>0:
        msg = "damage processing latency:"
        target_latency = 0.010 + (0.050*low_limit/1024.0/1024.0)
        factors.append(calculate_for_target(msg, target_latency, avg_damage_in_latency, recent_damage_in_latency, aim=0.8, slope=0.005, smoothing=sqrt))
    #damage "out" latency
    if len(statistics.damage_out_latency)>0:
        msg = "damage send latency:"
        target_latency = 0.025 + (0.060*low_limit/1024.0/1024.0)
        factors.append(calculate_for_target(msg, target_latency, avg_damage_out_latency, recent_damage_out_latency, aim=0.8, slope=0.010, smoothing=sqrt))
    #send speed:
    if avg_send_speed is not None and recent_send_speed is not None:
        #our calculate methods aims for lower values, so invert speed
        #this is how long it takes to send 1MB:
        avg1MB = 1.0*1024*1024/avg_send_speed
        recent1MB = 1.0*1024*1024/recent_send_speed
        #we only really care about this when the speed is quite low,
        #so adjust the weight accordingly:
        minspeed = float(128*1024)
        div = logp(max(recent_send_speed, minspeed)/minspeed)
        msg = "network send speed: avg=%s, recent=%s (KBytes/s), div=%s" % (int(avg_send_speed/1024), int(recent_send_speed/1024), div)
        factors.append(calculate_for_average(msg, avg1MB, recent1MB, weight_offset=1.0, weight_div=div))
    #client decode time:
    if avg_decode_speed is not None and recent_decode_speed is not None:
        msg = "client decode speed: avg=%s, recent=%s (MPixels/s)" % (dec1(avg_decode_speed/1000/1000), dec1(recent_decode_speed/1000/1000))
        #our calculate methods aims for lower values, so invert speed
        #this is how long it takes to send 1MB:
        avg1MB = 1.0*1024*1024/avg_decode_speed
        recent1MB = 1.0*1024*1024/recent_decode_speed
        factors.append(calculate_for_average(msg, avg1MB, recent1MB, weight_offset=0.0))
    #elapsed time without damage:
    if batch.last_updated>0:
        #If nothing happens for a while then we can reduce the batch delay,
        #however we must ensure this is not caused by a high damage latency
        #so we ignore short elapsed times.
        ignore_time = max(max_latency+batch.recalculate_delay, batch.delay+batch.recalculate_delay)
        ignore_count = 2 + ignore_time / batch.recalculate_delay
        elapsed = time.time()-batch.last_updated
        n_skipped_calcs = elapsed / batch.recalculate_delay
        #the longer the elapsed time, the more we slash:
        weight = logp(max(0, n_skipped_calcs-ignore_count))
        msg = "delay not updated for %s ms (skipped %s times - highest latency is %s)" % (dec1(1000*elapsed), int(n_skipped_calcs), dec1(1000*max_latency))
        factors.append((msg, 0, weight))

    target_latency = statistics.get_target_client_latency(global_statistics.min_client_latency, avg_client_latency)
    if len(global_statistics.client_latency)>0 and avg_client_latency is not None and recent_client_latency is not None:
        #client latency: (we want to keep client latency as low as can be)
        msg = "client latency:"
        factors.append(calculate_for_target(msg, target_latency, avg_client_latency, recent_client_latency, aim=0.8, slope=0.005, smoothing=sqrt))
    if len(global_statistics.client_ping_latency)>0:
        msg = "client ping latency:"
        factors.append(calculate_for_target(msg, target_latency, avg_client_ping_latency, recent_client_ping_latency, aim=0.95, slope=0.005, smoothing=sqrt, weight_multiplier=0.25))
    if len(global_statistics.server_ping_latency)>0:
        msg = "server ping latency:"
        factors.append(calculate_for_target(msg, target_latency, avg_server_ping_latency, recent_server_ping_latency, aim=0.95, slope=0.005, smoothing=sqrt, weight_multiplier=0.25))
    #damage packet queue size: (includes packets from all windows)
    factors.append(queue_inspect("damage packet queue size:", global_statistics.damage_packet_qsizes, smoothing=sqrt))
    #damage pixels waiting in the packet queue: (extract data for our window id only)
    time_values = [(event_time, value) for event_time, dwid, value in list(global_statistics.damage_packet_qpixels) if dwid==wid]
    factors.append(queue_inspect("damage packet queue pixels:", time_values, div=low_limit, smoothing=sqrt))
    #damage data queue: (This is an important metric since each item will consume a fair amount of memory and each will later on go through the other queues.)
    factors.append(queue_inspect("damage data queue:", global_statistics.damage_data_qsizes))
    if global_statistics.mmap_size>0:
        #full: effective range is 0.0 to ~1.2
        full = 1.0-float(global_statistics.mmap_free_size)/global_statistics.mmap_size
        #aim for ~50%
        factors.append(("mmap area %s%% full" % int(100*full), logp(2*full), 2*full))
    #now use those factors to drive the delay change:
    update_batch_delay(batch, factors)
    #***************************************************************
    #special hook for video encoders
    if video_encoder is None:
        return

    #***********************************************************
    # encoding speed:
    #    0    for highest compression/slower
    #    100  for lowest compression/fast
    # here we try to minimize damage-latency and client decoding speed
    if fixed_speed>=0:
        new_speed = fixed_speed
        msg = "video encoder using fixed speed: %s", fixed_speed
    else:
        min_damage_latency = 0.010 + (0.050*low_limit/1024.0/1024.0)
        target_damage_latency = min_damage_latency + batch.delay/1000.0
        dam_lat = (avg_damage_in_latency or 0)/target_damage_latency
        target_decode_speed = 1*1000*1000      #1 MPixels/s
        dec_lat = 0.0
        if avg_decode_speed:
            dec_lat = target_decode_speed/(avg_decode_speed or target_decode_speed)
        target = max(dam_lat, dec_lat, 0.0)
        target_speed = 100.0 * min(1.0, target)
        video_encoder_speed.append((time.time(), target_speed))
        _, new_speed = calculate_time_weighted_average(video_encoder_speed)
        msg = "video encoder speed factors: min_damage_latency=%s, target_damage_latency=%s, batch.delay=%s, dam_lat=%s, dec_lat=%s, target=%s, new_speed=%s", \
                 dec2(min_damage_latency), dec2(target_damage_latency), dec2(batch.delay), dec2(dam_lat), dec2(dec_lat), int(target_speed), int(new_speed)
    log(*msg)
    if DEBUG_DELAY:
        add_DEBUG_DELAY_MESSAGE(msg)
    #***********************************************************
    # quality:
    #    0    for lowest quality (low bandwidth usage)
    #    100  for best quality (high bandwidth usage)
    # here we try minimize client-latency, packet-backlog and batch.delay
    if fixed_quality>=0:
        new_quality = fixed_quality
        msg = "video encoder using fixed quality: %s", fixed_quality
    else:
        packets_backlog, _, _ = statistics.get_backlog(target_latency)
        packets_bl = 1.0 - logp(packets_backlog/low_limit)
        batch_q = 4.0 * batch.min_delay / batch.delay
        target = max(packets_bl, batch_q)
        latency_q = 0.0
        if len(global_statistics.client_latency)>0 and recent_client_latency>0:
            latency_q = 4.0 * target_latency / recent_client_latency
            target = min(target, latency_q)
        target_quality = 100.0*(min(1.0, max(0.0, target)))
        video_encoder_quality.append((time.time(), target_quality))
        new_quality, _ = calculate_time_weighted_average(video_encoder_quality)
        msg = "video encoder quality factors: packets_bl=%s, batch_q=%s, latency_q=%s, target=%s, new_quality=%s", \
                 dec2(packets_bl), dec2(batch_q), dec2(latency_q), int(target_quality), int(new_quality)
    log(*msg)
    if DEBUG_DELAY:
        add_DEBUG_DELAY_MESSAGE(msg)
    try:
        video_encoder_lock.acquire()
        video_encoder.set_encoding_speed(new_speed)
        video_encoder.set_encoding_quality(new_quality)
    finally:
        video_encoder_lock.release()
예제 #17
0
    def damage(self, window, x, y, w, h, options=None):
        """ decide what to do with the damage area:
            * send it now (if not congested)
            * add it to an existing delayed region
            * create a new delayed region if we find the client needs it
            Also takes care of updating the batch-delay in case of congestion.
            The options dict is currently used for carrying the
            "quality" and "override_options" values, and potentially others.
            When damage requests are delayed and bundled together,
            specify an option of "override_options"=True to
            force the current options to override the old ones,
            otherwise they are only merged.
        """
        self.may_calculate_batch_delay(window)
        if w==0 or h==0:
            #we may fire damage ourselves,
            #in which case the dimensions may be zero (if so configured by the client)
            return

        if self._damage_delayed:
            #use existing delayed region:
            region = self._damage_delayed[2]
            region.union_with_rect(gtk.gdk.Rectangle(x, y, w, h))
            #merge/override options
            if options is not None:
                override = options.get("override_options", False)
                existing_options = self._damage_delayed[4]
                for k,v in options.items():
                    if override or k not in existing_options:
                        existing_options[k] = v
            log("damage(%s, %s, %s, %s, %s) wid=%s, using existing delayed region: %s", x, y, w, h, options, self.wid, self._damage_delayed)
            return

        now = time.time()
        packets_backlog = self.get_packets_backlog()
        if packets_backlog==0 and not self.batch_config.always and self.batch_config.delay<=self.batch_config.min_delay:
            #send without batching:
            log("damage(%s, %s, %s, %s, %s) wid=%s, sending now with sequence %s", x, y, w, h, options, self.wid, self._sequence)
            ww, wh = window.get_dimensions()
            actual_encoding = self.get_best_encoding(window.is_OR(), w*h, ww, wh, self.encoding)
            if actual_encoding in ("x264", "vpx"):
                x, y = 0, 0
                w, h = ww, wh
            self.process_damage_region(now, window, x, y, w, h, actual_encoding, options)
            self.batch_config.last_delays.append((now, 0))
            self.batch_config.last_actual_delays.append((now, 0))
            return

        #create a new delayed region:
        region = gtk.gdk.Region()
        region.union_with_rect(gtk.gdk.Rectangle(x, y, w, h))
        self._damage_delayed_expired = False
        self._damage_delayed = now, window, region, self.encoding, options or {}
        log("damage(%s, %s, %s, %s, %s) wid=%s, scheduling batching expiry for sequence %s in %s ms", x, y, w, h, options, self.wid, self._sequence, dec1(self.batch_config.delay))
        self.batch_config.last_delays.append((now, self.batch_config.delay))
        self.expire_timer = gobject.timeout_add(int(self.batch_config.delay), self.expire_delayed_region)
예제 #18
0
    def process_damage_region(self, damage_time, window, x, y, w, h, coding, options):
        """
            Called by 'damage' or 'send_delayed_regions' to process a damage region,
            we extract the rgb data from the pixmap and place it on the damage queue.
            This runs in the UI thread.
        """
        if w==0 or h==0:
            return
        # It's important to acknowledge changes *before* we extract them,
        # to avoid a race condition.
        if not window.is_managed():
            log.warn("the window %s is not composited!?", window)
            return
        window.acknowledge_changes()

        self._sequence += 1
        sequence = self._sequence
        pixmap = window.get_property("client-contents")
        if self.is_cancelled(sequence):
            log("get_window_pixmap: dropping damage request with sequence=%s", sequence)
            return  None
        if pixmap is None:
            log.error("get_window_pixmap: wtf, pixmap is None for window %s, wid=%s", window, self.wid)
            return
        process_damage_time = time.time()
        data = get_rgb_rawdata(damage_time, process_damage_time, self.wid, pixmap, x, y, w, h, coding, sequence, options)
        if not data:
            return
        log("process_damage_regions: adding pixel data %s to queue, elapsed time: %s ms", data[:6], dec1(1000*(time.time()-damage_time)))
        def make_data_packet(*args):
            #NOTE: this function is called from the damage data thread!
            packet = self.make_data_packet(*data)
            if packet:
                self.queue_damage_packet(pixmap, packet, damage_time, process_damage_time)
                if self.encoding in ("png", "rgb24"):
                    #primary encoding is lossless, no need for auto-refresh
                    return
                #auto-refresh:
                if self.auto_refresh_delay>0 and not self.is_cancelled(sequence):
                    client_options = packet[10]     #info about this packet from the encoder
                    gobject.idle_add(self.schedule_auto_refresh, window, w, h, coding, options, client_options)
        self.queue_damage(make_data_packet)
예제 #19
0
    def damage(self, window, x, y, w, h, options=None):
        """ decide what to do with the damage area:
            * send it now (if not congested)
            * add it to an existing delayed region
            * create a new delayed region if we find the client needs it
            Also takes care of updating the batch-delay in case of congestion.
            The options dict is currently used for carrying the
            "quality" and "override_options" values, and potentially others.
            When damage requests are delayed and bundled together,
            specify an option of "override_options"=True to
            force the current options to override the old ones,
            otherwise they are only merged.
        """
        self.may_calculate_batch_delay(window)
        if w == 0 or h == 0:
            #we may fire damage ourselves,
            #in which case the dimensions may be zero (if so configured by the client)
            return

        if self._damage_delayed:
            #use existing delayed region:
            region = self._damage_delayed[2]
            region.union_with_rect(gtk.gdk.Rectangle(x, y, w, h))
            #merge/override options
            if options is not None:
                override = options.get("override_options", False)
                existing_options = self._damage_delayed[4]
                for k, v in options.items():
                    if override or k not in existing_options:
                        existing_options[k] = v
            log(
                "damage(%s, %s, %s, %s, %s) wid=%s, using existing delayed region: %s",
                x, y, w, h, options, self.wid, self._damage_delayed)
            return

        now = time.time()
        packets_backlog = self.get_packets_backlog()
        if packets_backlog == 0 and not self.batch_config.always and self.batch_config.delay <= self.batch_config.min_delay:
            #send without batching:
            log(
                "damage(%s, %s, %s, %s, %s) wid=%s, sending now with sequence %s",
                x, y, w, h, options, self.wid, self._sequence)
            ww, wh = window.get_dimensions()
            actual_encoding = self.get_best_encoding(window.is_OR(), w * h, ww,
                                                     wh, self.encoding)
            if actual_encoding in ("x264", "vpx"):
                x, y = 0, 0
                w, h = ww, wh
            self.process_damage_region(now, window, x, y, w, h,
                                       actual_encoding, options)
            self.batch_config.last_delays.append((now, 0))
            self.batch_config.last_actual_delays.append((now, 0))
            return

        #create a new delayed region:
        region = gtk.gdk.Region()
        region.union_with_rect(gtk.gdk.Rectangle(x, y, w, h))
        self._damage_delayed_expired = False
        self._damage_delayed = now, window, region, self.encoding, options or {}
        log(
            "damage(%s, %s, %s, %s, %s) wid=%s, scheduling batching expiry for sequence %s in %s ms",
            x, y, w, h, options, self.wid, self._sequence,
            dec1(self.batch_config.delay))
        self.batch_config.last_delays.append((now, self.batch_config.delay))
        self.expire_timer = gobject.timeout_add(int(self.batch_config.delay),
                                                self.expire_delayed_region)
예제 #20
0
    def damage(self, window, x, y, w, h, options=None):
        """ decide what to do with the damage area:
            * send it now (if not congested or batch.enabled is off)
            * add it to an existing delayed region
            * create a new delayed region if we find the client needs it
            Also takes care of updating the batch-delay in case of congestion.
            The options dict is currently used for carrying the
            "jpegquality" value, it could also be used for other purposes.
            Be aware though that when multiple
            damage requests are delayed and bundled together,
            the options may get quashed! So, specify a "batching"=False
            option to ensure no batching will occur for this request.
        """
        if w == 0 or h == 0:
            return
        now = time.time()
        coding = self.encoding

        def damage_now(reason):
            self._sequence += 1
            logrec = "damage(%s, %s, %s, %s) %s, wid=%s, sending now with sequence %s", x, y, w, h, reason, self.wid, self._sequence
            log(*logrec)
            pixmap = self.get_window_pixmap(window, self._sequence)
            if pixmap:
                ww, wh = window.get_dimensions()
                actual_encoding = self.get_best_encoding(w * h, ww, wh, coding)
                if actual_encoding in ("x264", "vpx"):
                    #always fullscreen
                    self.process_damage_region(now, pixmap, 0, 0, ww, wh,
                                               actual_encoding, self._sequence,
                                               options)
                else:
                    self.process_damage_region(now, pixmap, x, y, w, h,
                                               actual_encoding, self._sequence,
                                               options)
                self.batch_config.last_delays.append((now, 0))
                self.batch_config.last_updated = time.time()

        #record this damage event in the damage_last_events queue
        #note: we may actually end up sending more pixels than this value (ie: full screen update)
        now = time.time()

        if not self.batch_config.enabled:
            return damage_now("batching disabled")
        if options and options.get("batching", True) is False:
            return damage_now("batching option is off")

        self.may_calculate_batch_delay(window)

        if self._damage_delayed:
            region = self._damage_delayed[2]
            region.union_with_rect(gtk.gdk.Rectangle(x, y, w, h))
            log(
                "damage(%s, %s, %s, %s) wid=%s, using existing delayed region: %s",
                x, y, w, h, self.wid, self._damage_delayed)
            return

        if not self.batch_config.always and self.batch_config.delay <= self.batch_config.min_delay:
            return damage_now(
                "delay (%s) is at the minimum threshold (%s)" %
                (self.batch_config.delay, self.batch_config.min_delay))

        #create a new delayed region:
        region = gtk.gdk.Region()
        region.union_with_rect(gtk.gdk.Rectangle(x, y, w, h))
        self._sequence += 1
        self._damage_delayed = (now, window, region, coding, self._sequence,
                                options)

        def send_delayed():
            """ move the delayed rectangles to the expired list """
            if self._damage_delayed:
                damage_time = self._damage_delayed[0]
                log(
                    "send_delayed for wid %s, batch delay is %s, elapsed time is %s ms",
                    self.wid, self.batch_config.delay,
                    dec1(1000 * (time.time() - damage_time)))
                delayed = self._damage_delayed
                self._damage_delayed = None
                self.send_delayed_regions(*delayed)
            else:
                log("window %s already removed from delayed list?", self.wid)
            return False

        log(
            "damage(%s, %s, %s, %s) wid=%s, scheduling batching expiry for sequence %s in %s ms",
            x, y, w, h, self.wid, self._sequence,
            dec1(self.batch_config.delay))
        self.batch_config.last_delays.append((now, self.batch_config.delay))
        gobject.timeout_add(int(self.batch_config.delay), send_delayed)
예제 #21
0
def calculate_batch_delay(window, wid, batch, global_statistics, statistics,
                          video_encoder=None, video_encoder_lock=None, video_encoder_speed=None, video_encoder_quality=None):
    """
        Calculates a new batch delay.
        We first gather some statistics,
        then use them to calculate a number of factors.
        which are then used to adjust the batch delay in 'update_batch_delay'.
    """
    #the number of pixels which can be considered 'low' in terms of backlog.
    #Generally, just one full frame, (more with mmap because it is so fast)
    low_limit = 1024*1024
    if window:
        ww, wh = window.get_dimensions()
        low_limit = max(8*8, ww*wh)
        if global_statistics.mmap_size>0:
            #mmap can accumulate much more as it is much faster
            low_limit *= 4
    #client latency: (how long it takes for a packet to get to the client and get the echo back)
    avg_client_latency, recent_client_latency = 0.1, 0.1    #assume 100ms until we get some data
    if len(global_statistics.client_latency)>0:
        data = [(when, latency) for _, when, _, latency in list(global_statistics.client_latency)]
        avg_client_latency, recent_client_latency = calculate_time_weighted_average(data)
    #damage "in" latency: (the time it takes for damage requests to be processed only)
    avg_damage_in_latency, recent_damage_in_latency = 0, 0
    if len(statistics.damage_in_latency)>0:
        data = [(when, latency) for when, _, _, latency in list(statistics.damage_in_latency)]
        avg_damage_in_latency, recent_damage_in_latency =  calculate_time_weighted_average(data)
    #damage "out" latency: (the time it takes for damage requests to be processed and sent out)
    avg_damage_out_latency, recent_damage_out_latency = 0, 0
    if len(statistics.damage_out_latency)>0:
        data = [(when, latency) for when, _, _, latency in list(statistics.damage_out_latency)]
        avg_damage_out_latency, recent_damage_out_latency = calculate_time_weighted_average(data)
    #client decode speed:
    avg_decode_speed, recent_decode_speed = None, None
    if len(statistics.client_decode_time)>0:
        #the elapsed time recorded is in microseconds, so multiply by 1000*1000 to get the real value:
        avg_decode_speed, recent_decode_speed = calculate_timesize_weighted_average(list(statistics.client_decode_time), sizeunit=1000*1000)
    #network send speed:
    avg_send_speed, recent_send_speed = None, None
    if len(statistics.damage_send_speed)>0:
        avg_send_speed, recent_send_speed = calculate_timesize_weighted_average(list(statistics.damage_send_speed))
    #client backlog: (packets and pixels that should have been processed by now - taking into account latency)
    packets_backlog, pixels_backlog = 0, 0
    if len(statistics.damage_ack_pending)>0:
        sent_before = time.time()-avg_client_latency
        for sent_at, pixels in statistics.damage_ack_pending.values():
            if sent_at>sent_before:
                continue
            packets_backlog += 1
            pixels_backlog += pixels
    max_latency = max(avg_damage_in_latency, recent_damage_in_latency, avg_damage_out_latency, recent_damage_out_latency)

    #for each indicator: (description, factor, weight)
    factors = []

    #damage "in" latency factor:
    if len(statistics.damage_in_latency)>0:
        msg = "damage processing latency:"
        target_latency = 0.010 + (0.050*low_limit/1024.0/1024.0)
        factors.append(calculate_for_target(msg, target_latency, avg_damage_in_latency, recent_damage_in_latency, aim=0.8, slope=0.005, smoothing=sqrt))
    #damage "out" latency
    if len(statistics.damage_out_latency)>0:
        msg = "damage send latency:"
        target_latency = 0.025 + (0.060*low_limit/1024.0/1024.0)
        factors.append(calculate_for_target(msg, target_latency, avg_damage_out_latency, recent_damage_out_latency, aim=0.8, slope=0.010, smoothing=sqrt))
    #send speed:
    if avg_send_speed is not None and recent_send_speed is not None:
        #our calculate methods aims for lower values, so invert speed
        #this is how long it takes to send 1MB:
        avg1MB = 1.0*1024*1024/avg_send_speed
        recent1MB = 1.0*1024*1024/recent_send_speed
        #we only really care about this when the speed is quite low,
        #so adjust the weight accordingly:
        minspeed = float(128*1024)
        div = logp(max(recent_send_speed, minspeed)/minspeed)
        msg = "network send speed: avg=%s, recent=%s (KBytes/s), div=%s" % (int(avg_send_speed/1024), int(recent_send_speed/1024), div)
        factors.append(calculate_for_average(msg, avg1MB, recent1MB, weight_offset=1.0, weight_div=div))
    #client decode time:
    if avg_decode_speed is not None and recent_decode_speed is not None:
        msg = "client decode speed: avg=%s, recent=%s (MPixels/s)" % (dec1(avg_decode_speed/1000/1000), dec1(recent_decode_speed/1000/1000))
        #our calculate methods aims for lower values, so invert speed
        #this is how long it takes to send 1MB:
        avg1MB = 1.0*1024*1024/avg_decode_speed
        recent1MB = 1.0*1024*1024/recent_decode_speed
        factors.append(calculate_for_average(msg, avg1MB, recent1MB, weight_offset=0.0))
    #elapsed time without damage:
    if batch.last_updated>0:
        #If nothing happens for a while then we can reduce the batch delay,
        #however we must ensure this is not caused by a high damage latency
        #so we ignore short elapsed times.
        ignore_time = max(max_latency+batch.recalculate_delay, batch.delay+batch.recalculate_delay)
        ignore_count = 2 + ignore_time / batch.recalculate_delay
        elapsed = time.time()-batch.last_updated
        n_skipped_calcs = elapsed / batch.recalculate_delay
        #the longer the elapsed time, the more we slash:
        weight = logp(max(0, n_skipped_calcs-ignore_count))
        msg = "delay not updated for %s ms (skipped %s times - highest latency is %s)" % (dec1(1000*elapsed), int(n_skipped_calcs), dec1(1000*max_latency))
        factors.append((msg, 0, weight))
    #client latency: (we want to keep client latency as low as can be)
    if len(global_statistics.client_latency)>0 and avg_client_latency is not None and recent_client_latency is not None:
        target_latency = 0.005
        if global_statistics.min_client_latency:
            target_latency = max(target_latency, global_statistics.min_client_latency)
        msg = "client latency:"
        factors.append(calculate_for_target(msg, target_latency, avg_client_latency, recent_client_latency, aim=0.8, slope=0.005, smoothing=sqrt, weight_multiplier=4.0))
    #damage packet queue size: (includes packets from all windows)
    factors.append(queue_inspect("damage packet queue size:", global_statistics.damage_packet_qsizes, smoothing=sqrt))
    #damage pixels waiting in the packet queue: (extract data for our window id only)
    time_values = [(event_time, value) for event_time, dwid, value in list(global_statistics.damage_packet_qpixels) if dwid==wid]
    factors.append(queue_inspect("damage packet queue pixels:", time_values, div=low_limit, smoothing=sqrt))
    #damage data queue: (This is an important metric since each item will consume a fair amount of memory and each will later on go through the other queues.)
    factors.append(queue_inspect("damage data queue:", global_statistics.damage_data_qsizes))
    last_packets_backlog, last_pixels_backlog = 0, 0
    if statistics.last_packet_send_stats is not None:
        #packet and pixels backlog:
        last_packets_backlog, last_pixels_backlog = statistics.last_client_delta
        factors.append(calculate_for_target("client packets backlog:", 0, last_packets_backlog, packets_backlog, slope=1.0, smoothing=sqrt))
        factors.append(calculate_for_target("client pixels backlog:", 0, last_pixels_backlog, pixels_backlog, div=low_limit, slope=1.0, smoothing=sqrt))
    if global_statistics.mmap_size>0:
        #full: effective range is 0.0 to ~1.2
        full = 1.0-float(global_statistics.mmap_free_size)/global_statistics.mmap_size
        #aim for ~50%
        factors.append(("mmap area %s%% full" % int(100*full), logp(2*full), 2*full))
    #now use those factors to drive the delay change:
    update_batch_delay(batch, factors)
    #***************************************************************
    #special hook for video encoders
    if (not AUTO_QUALITY and not AUTO_SPEED) or video_encoder is None:
        return

    #***********************************************************
    # encoding speed:
    #    0    for highest compression/slower
    #    100  for lowest compression/fast
    # here we try to minimize damage-latency and client decoding speed
    min_damage_latency = 0.010 + (0.050*low_limit/1024.0/1024.0)
    target_damage_latency = min_damage_latency + batch.delay/1000.0
    dam_lat = (avg_damage_in_latency or 0)/target_damage_latency
    target_decode_speed = 1*1000*1000      #1 MPixels/s
    dec_lat = target_decode_speed/(avg_decode_speed or target_decode_speed)
    target = max(dam_lat, dec_lat, 0.0)
    target_speed = 100.0 * min(1.0, target)
    video_encoder_speed.append((time.time(), target_speed))
    _, new_speed = calculate_time_weighted_average(video_encoder_speed)
    msg = "video encoder speed factors: min_damage_latency=%s, target_damage_latency=%s, batch.delay=%s, dam_lat=%s, dec_lat=%s, target=%s, new_speed=%s", \
             dec2(min_damage_latency), dec2(target_damage_latency), dec2(batch.delay), dec2(dam_lat), dec2(dec_lat), int(target_speed), int(new_speed)
    log(*msg)
    if DEBUG_DELAY:
        add_DEBUG_DELAY_MESSAGE(msg)
    #***********************************************************
    # quality:
    #    0    for lowest quality (low bandwidth usage)
    #    100  for best quality (high bandwidth usage)
    # here we try minimize client-latency, packet-backlog and batch.delay
    packets_bl = 1.0 - logp(last_packets_backlog/low_limit)
    batch_q = 4.0 * batch.min_delay / batch.delay
    target = max(packets_bl, batch_q)
    latency_q = 0.0
    if len(global_statistics.client_latency)>0 and avg_client_latency is not None and recent_client_latency is not None:
        target_latency = 0.005
        if global_statistics.min_client_latency:
            target_latency = max(target_latency, global_statistics.min_client_latency)
        latency_q = 4.0 * target_latency / recent_client_latency
        target = min(target, latency_q)
    target_quality = 100.0*(min(1.0, max(0.0, target)))
    video_encoder_quality.append((time.time(), target_quality))
    new_quality, _ = calculate_time_weighted_average(video_encoder_quality)
    msg = "video encoder quality factors: packets_bl=%s, batch_q=%s, latency_q=%s, target=%s, new_quality=%s", \
             dec2(packets_bl), dec2(batch_q), dec2(latency_q), int(target_quality), int(new_quality)
    log(*msg)
    if DEBUG_DELAY:
        add_DEBUG_DELAY_MESSAGE(msg)
    try:
        video_encoder_lock.acquire()
        if AUTO_SPEED:
            video_encoder.set_encoding_speed(new_speed)
        if AUTO_QUALITY:
            video_encoder.set_encoding_quality(new_quality)
    finally:
        video_encoder_lock.release()
예제 #22
0
 def mmap_send(self, coding, data):
     start = time.time()
     mmap_data = self._mmap_send(data)
     elapsed = time.time()-start+0.000000001 #make sure never zero!
     log("%s MBytes/s - %s bytes written to mmap in %s ms", int(len(data)/elapsed/1024/1024), len(data), dec1(1000*elapsed))
     if mmap_data is not None:
         self.global_statistics.mmap_bytes_sent += len(data)
         coding = "mmap"
         data = mmap_data
     return coding, data