Пример #1
0
    def update_averages(self):
        def latency_averages(values):
            avg, recent = calculate_time_weighted_average(values)
            return max(0.001, avg), max(0.001, recent)

        client_latency = tuple(self.client_latency)
        if client_latency:
            data = tuple(
                (when, latency) for _, when, _, latency in client_latency)
            self.min_client_latency = min(x for _, x in data)
            self.avg_client_latency, self.recent_client_latency = latency_averages(
                data)
        #client ping latency: from ping packets
        client_ping_latency = tuple(self.client_ping_latency)
        if client_ping_latency:
            self.min_client_ping_latency = min(x
                                               for _, x in client_ping_latency)
            self.avg_client_ping_latency, self.recent_client_ping_latency = latency_averages(
                client_ping_latency)
        #server ping latency: from ping packets
        server_ping_latency = tuple(self.server_ping_latency)
        if server_ping_latency:
            self.min_server_ping_latency = min(x
                                               for _, x in server_ping_latency)
            self.avg_server_ping_latency, self.recent_server_ping_latency = latency_averages(
                server_ping_latency)
        #set to 0 if we have less than 2 events in the last 60 seconds:
        now = monotonic_time()
        min_time = now - 60
        css = tuple(x for x in tuple(self.congestion_send_speed)
                    if x[0] > min_time)
        acss = 0
        if len(css) >= 2:
            #weighted average of the send speed over the last minute:
            acss = int(calculate_size_weighted_average(css)[0])
            latest_ctime = self.congestion_send_speed[-1][0]
            elapsed = now - latest_ctime
            #require at least one recent event:
            if elapsed < 30:
                #as the last event recedes in the past, increase limit:
                acss *= 1 + elapsed
        self.avg_congestion_send_speed = int(acss)
        #how often we get congestion events:
        #first chunk it into second intervals
        min_time = now - 10
        cst = tuple(x[0] for x in css)
        cps = []
        for t in range(10):
            etime = now - t
            matches = tuple(
                1 for x in cst if x > etime - 1 and x <= etime) or (0, )
            cps.append((etime, sum(matches)))
        #log("cps(%s)=%s (now=%s)", cst, cps, now)
        self.congestion_value = time_weighted_average(cps)
Пример #2
0
 def update_averages(self):
     if len(self.client_latency) > 0:
         data = [(when, latency)
                 for _, when, _, latency in tuple(self.client_latency)]
         self.min_client_latency = min([x for _, x in data])
         self.avg_client_latency, self.recent_client_latency = calculate_time_weighted_average(
             data)
     #client ping latency: from ping packets
     if len(self.client_ping_latency) > 0:
         data = tuple(self.client_ping_latency)
         self.min_client_ping_latency = min([x for _, x in data])
         self.avg_client_ping_latency, self.recent_client_ping_latency = calculate_time_weighted_average(
             data)
     #server ping latency: from ping packets
     if len(self.server_ping_latency) > 0:
         data = tuple(self.server_ping_latency)
         self.min_server_ping_latency = min([x for _, x in data])
         self.avg_server_ping_latency, self.recent_server_ping_latency = calculate_time_weighted_average(
             data)
     #set to 0 if we have less than 2 events in the last 60 seconds:
     min_time = monotonic_time() - 60
     css = tuple(x for x in self.congestion_send_speed if x[0] > min_time)
     if len(css) <= 2:
         self.avg_congestion_send_speed = 0
     else:
         self.avg_congestion_send_speed = int(
             calculate_size_weighted_average(
                 tuple(self.congestion_send_speed))[0])
     #how often we get congestion events:
     #first chunk it into second intervals
     now = monotonic_time()
     min_time = now - 10
     cst = tuple(x[0] for x in css)
     cps = []
     for t in range(10):
         etime = now - t
         cps.append(
             (etime, sum(1 for x in cst if x > etime - 1 and x <= etime)))
     #log("cps(%s)=%s (now=%s)", cst, cps, now)
     self.congestion_value = time_weighted_average(cps)
Пример #3
0
def get_target_speed(wid, window_dimensions, batch, global_statistics, statistics, min_speed, speed_data):
    low_limit = get_low_limit(global_statistics.mmap_size>0, window_dimensions)
    #***********************************************************
    # encoding speed:
    #    0    for highest compression/slower
    #    100  for lowest compression/fast
    # here we try to minimize damage-latency and client decoding speed

    #megapixels per second:
    mpixels = low_limit/1024.0/1024.0
    #for larger window sizes, we should be downscaling,
    #and don't want to wait too long for those anyway:
    ref_damage_latency = 0.010 + 0.025 * (1+mathlog(max(1, mpixels)))

    #abs: try to never go higher than 5 times reference latency:
    dam_lat_abs = max(0, ((statistics.avg_damage_in_latency or 0)-ref_damage_latency) / (ref_damage_latency * 4.0))

    #calculate a target latency and try to get close to it
    avg_delay = batch.delay
    delays = list(batch.last_actual_delays)
    if len(delays)>0:
        #average recent actual delay:
        avg_delay = time_weighted_average(delays)
    #and average that with the current delay (which is lower or equal):
    frame_delay = (avg_delay + batch.delay) / 2.0
    #ensure we always spend at least as much time encoding as we spend batching:
    #(one frame encoding whilst one frame is batching is our ideal result)
    target_damage_latency = max(ref_damage_latency, frame_delay/1000.0)
    #current speed:
    speed = min_speed
    if len(speed_data)>0:
        speed = max(min_speed, time_weighted_average(speed_data))
    #rel: do we need to increase or decrease speed to reach the target:
    dam_lat_rel = speed/100.0 * statistics.avg_damage_in_latency / target_damage_latency

    #ensure we decode at a reasonable speed (for slow / low-power clients)
    #maybe this should be configurable?
    target_decode_speed = 8*1000*1000.0      #8 MPixels/s
    dec_lat = 0.0
    if statistics.avg_decode_speed>0:
        dec_lat = target_decode_speed/(statistics.avg_decode_speed or target_decode_speed)

    #if we have more pixels to encode, we may need to go faster
    #(this is important because the damage latency used by the other factors
    # may aggregate multiple damage requests into one packet - which may skip frames)
    #TODO: reconcile this with video regions
    #only count the last second's worth:
    now = time.time()
    lim = now-1.0
    lde = [w*h for t,_,_,w,h in list(statistics.last_damage_events) if t>=lim]
    pixels = sum(lde)
    mpixels_per_s = pixels/1024.0/1024.0
    pps = 0.0
    if len(lde)>5:
        #above 50 MPixels/s, we should reach 100% speed
        #(even x264 peaks at tens of MPixels/s)
        pps = mpixels_per_s/50.0

    #combine factors: use the highest one:
    target = min(1.0, max(dam_lat_abs, dam_lat_rel, dec_lat, pps, 0.0))

    #scale target between min_speed and 100:
    ms = min(100.0, max(min_speed, 0.0))
    target_speed = int(ms + (100.0-ms) * target)

    #expose data we used:
    info = {
            "low_limit"                 : int(low_limit),
            "min_speed"                 : int(min_speed),
            "frame_delay"               : int(frame_delay),
            "mpixels"                   : int(mpixels_per_s),
            "damage_latency"            : {
                                           "ref"        : int(1000.0*ref_damage_latency),
                                           "avg"        : int(1000.0*statistics.avg_damage_in_latency),
                                           "target"     : int(1000.0*target_damage_latency),
                                           "abs_factor" : int(100.0*dam_lat_abs),
                                           "rel_factor" : int(100.0*dam_lat_rel),
                                           },
            "decoding_latency"          : {
                                           "target"   : int(target_decode_speed),
                                           "factor"   : int(100.0*dec_lat),
                                           },
            }
    return info, target_speed
Пример #4
0
def get_target_speed(wid, window_dimensions, batch, global_statistics,
                     statistics, min_speed, speed_data):
    low_limit = get_low_limit(global_statistics.mmap_size > 0,
                              window_dimensions)
    #***********************************************************
    # encoding speed:
    #    0    for highest compression/slower
    #    100  for lowest compression/fast
    # here we try to minimize damage-latency and client decoding speed

    #megapixels per second:
    mpixels = low_limit / 1024.0 / 1024.0
    #for larger window sizes, we should be downscaling,
    #and don't want to wait too long for those anyway:
    ref_damage_latency = 0.010 + 0.025 * (1 + mathlog(max(1, mpixels)))

    #abs: try to never go higher than 5 times reference latency:
    dam_lat_abs = max(
        0, ((statistics.avg_damage_in_latency or 0) - ref_damage_latency) /
        (ref_damage_latency * 4.0))

    #calculate a target latency and try to get close to it
    avg_delay = batch.delay
    delays = list(batch.last_actual_delays)
    if len(delays) > 0:
        #average recent actual delay:
        avg_delay = time_weighted_average(delays)
    #and average that with the current delay (which is lower or equal):
    frame_delay = (avg_delay + batch.delay) / 2.0
    #ensure we always spend at least as much time encoding as we spend batching:
    #(one frame encoding whilst one frame is batching is our ideal result)
    target_damage_latency = max(ref_damage_latency, frame_delay / 1000.0)
    #current speed:
    speed = min_speed
    if len(speed_data) > 0:
        speed = max(min_speed, time_weighted_average(speed_data))
    #rel: do we need to increase or decrease speed to reach the target:
    dam_lat_rel = speed / 100.0 * statistics.avg_damage_in_latency / target_damage_latency

    #ensure we decode at a reasonable speed (for slow / low-power clients)
    #maybe this should be configurable?
    target_decode_speed = 8 * 1000 * 1000.0  #8 MPixels/s
    dec_lat = 0.0
    if statistics.avg_decode_speed > 0:
        dec_lat = target_decode_speed / (statistics.avg_decode_speed
                                         or target_decode_speed)

    #if we have more pixels to encode, we may need to go faster
    #(this is important because the damage latency used by the other factors
    # may aggregate multiple damage requests into one packet - which may skip frames)
    #TODO: reconcile this with video regions
    #only count the last second's worth:
    now = time.time()
    lim = now - 1.0
    lde = [
        w * h for t, _, _, w, h in list(statistics.last_damage_events)
        if t >= lim
    ]
    pixels = sum(lde)
    mpixels_per_s = pixels / 1024.0 / 1024.0
    pps = 0.0
    if len(lde) > 5:
        #above 50 MPixels/s, we should reach 100% speed
        #(even x264 peaks at tens of MPixels/s)
        pps = mpixels_per_s / 50.0

    #combine factors: use the highest one:
    target = min(1.0, max(dam_lat_abs, dam_lat_rel, dec_lat, pps, 0.0))

    #scale target between min_speed and 100:
    ms = min(100.0, max(min_speed, 0.0))
    target_speed = int(ms + (100.0 - ms) * target)

    #expose data we used:
    info = {
        "low_limit": int(low_limit),
        "min_speed": int(min_speed),
        "frame_delay": int(frame_delay),
        "mpixels": int(mpixels_per_s),
        "damage_latency": {
            "ref": int(1000.0 * ref_damage_latency),
            "avg": int(1000.0 * statistics.avg_damage_in_latency),
            "target": int(1000.0 * target_damage_latency),
            "abs_factor": int(100.0 * dam_lat_abs),
            "rel_factor": int(100.0 * dam_lat_rel),
        },
        "decoding_latency": {
            "target": int(target_decode_speed),
            "factor": int(100.0 * dec_lat),
        },
    }
    return info, target_speed
Пример #5
0
def get_target_speed(window_dimensions, batch, global_statistics, statistics,
                     bandwidth_limit, min_speed, speed_data):
    low_limit = get_low_limit(global_statistics.mmap_size > 0,
                              window_dimensions)
    #***********************************************************
    # encoding speed:
    #    0    for highest compression/slower
    #    100  for lowest compression/fast
    # here we try to minimize damage-latency and client decoding speed

    #backlog factor:
    _, pixels_backlog, _ = statistics.get_client_backlog()
    pb_ratio = pixels_backlog / low_limit
    pixels_bl_s = 100 - int(
        100 * logp(pb_ratio / 4))  #4 frames behind or more -> compress more

    #megapixels per second:
    mpixels = low_limit / 1024.0 / 1024.0
    #for larger window sizes, we should be downscaling,
    #and don't want to wait too long for those anyway:
    ref_damage_latency = (10 + 25 * (1 + mathlog(max(1, mpixels)))) / 1000.0

    adil = statistics.avg_damage_in_latency or 0
    #abs: try to never go higher than N times the reference latency:
    dam_lat_abs = max(0,
                      (adil - ref_damage_latency)) / (ref_damage_latency * 3)

    if batch.locked:
        target_damage_latency = ref_damage_latency
        dam_lat_rel = 0
        frame_delay = 0
        dam_lat_s = 100
    else:
        #calculate a target latency and try to get close to it
        avg_delay = batch.delay
        delays = tuple(batch.last_actual_delays)
        if delays:
            #average recent actual delay:
            avg_delay = time_weighted_average(delays)
        #and average that with the current delay (which is lower or equal):
        frame_delay = max(10, int((avg_delay + batch.delay) // 2))
        #ensure we always spend at least as much time encoding as we spend batching:
        #(one frame encoding whilst one frame is batching is our ideal result)
        target_damage_latency = max(ref_damage_latency, frame_delay / 1000.0)
        dam_target_speed = min_speed
        if speed_data:
            dam_target_speed = max(min_speed,
                                   time_weighted_average(speed_data))
        #rel: do we need to increase speed to reach the target:
        dam_lat_rel = dam_target_speed / 100.0 * adil / target_damage_latency
        #cap the speed if we're delaying frames longer than we should:
        #(so we spend more of that time compressing them better instead):
        dam_lat_s = int(100 * 2 * ref_damage_latency * 1000 // frame_delay)

    #if we have more pixels to encode, we may need to go faster
    #(this is important because the damage latency used by the other factors
    # may aggregate multiple damage requests into one packet - which may skip frames)
    #TODO: reconcile this with video regions
    #only count the last second's worth:
    now = monotonic()
    lim = now - 1.0
    lde = tuple(w * h for t, _, _, w, h in tuple(statistics.last_damage_events)
                if t >= lim)
    pixels = sum(lde)
    mpixels_per_s = pixels / (1024 * 1024)
    pps = 0.0
    pixel_rate_s = 100
    if len(lde) > 5 and mpixels_per_s >= 1:
        #above 50 MPixels/s, we should reach 100% speed
        #(even x264 peaks at tens of MPixels/s)
        pps = sqrt(mpixels_per_s / 50.0)
        #if there aren't many pixels,
        #we can spend more time compressing them better:
        #(since it isn't going to cost too much to compress)
        #ie: 2MPixels/s -> max_speed=60%
        pixel_rate_s = 20 + int(mpixels_per_s * 20)

    bandwidth_s = 100
    if bandwidth_limit > 0:
        #below N Mbps, lower the speed ceiling,
        #so we will compress better:
        N = 10
        bandwidth_s = int(100 * sqrt(bandwidth_limit / (N * 1000 * 1000)))

    gcv = global_statistics.congestion_value
    congestion_s = 100
    if gcv > 0:
        #apply strict limit for congestion events:
        congestion_s = max(0, int(100 - gcv * 1000))

    #ensure we decode at a reasonable speed (for slow / low-power clients)
    #maybe this should be configurable?
    min_decode_speed = 1 * 1000 * 1000  #MPixels/s
    ads = statistics.avg_decode_speed or 0
    dec_lat = 0
    if ads > 0:
        dec_lat = min_decode_speed / ads

    ms = min(100, max(min_speed, 0))
    max_speed = max(
        ms, min(pixels_bl_s, dam_lat_s, pixel_rate_s, bandwidth_s,
                congestion_s))
    #combine factors: use the highest one:
    target = min(1, max(dam_lat_abs, dam_lat_rel, dec_lat, pps, 0))
    #scale target between min_speed and 100:
    speed = int(ms + (100 - ms) * target)
    speed = max(ms, min(max_speed, speed))

    #expose data we used:
    info = {
        "low-limit": int(low_limit),
        "max-speed": int(max_speed),
        "min-speed": int(min_speed),
        "factors": {
            "damage-latency-abs": int(dam_lat_abs * 100),
            "damage-latency-rel": int(dam_lat_rel * 100),
            "decoding-latency": int(dec_lat * 100),
            "pixel-rate": int(pps * 100),
        },
        "limits": {
            "backlog": pixels_bl_s,
            "damage-latency": dam_lat_s,
            "pixel-rate": pixel_rate_s,
            "bandwidth-limit": bandwidth_s,
            "congestion": congestion_s,
        },
    }
    return info, int(speed), max_speed