Esempio n. 1
0
def get_target_quality(wid, window_dimensions, batch, global_statistics, statistics, min_quality, min_speed):
    low_limit = get_low_limit(global_statistics.mmap_size>0, window_dimensions)
    #***********************************************************
    # quality:
    #    0    for lowest quality (low bandwidth usage)
    #    100  for best quality (high bandwidth usage)
    # here we try minimize client-latency, packet-backlog and batch.delay
    # the compression ratio tells us if we can increase the quality
    packets_backlog, pixels_backlog, _ = statistics.get_client_backlog()
    pixels_bl = 1.0 - logp(pixels_backlog/low_limit)
    target = pixels_bl
    batch_q = -1
    if batch is not None:
        recs = len(batch.last_actual_delays)
        if recs>0:
            #weighted average between start delay and min_delay
            #so when we start and we don't have any records, we don't lower quality
            #just because the start delay is higher than min_delay
            ref_delay = (batch.START_DELAY*10.0/recs + batch.min_delay*recs) / (recs+10.0/recs)
            #anything less than N times the reference delay is good enough:
            N = 4
            batch_q = N * ref_delay / max(1, batch.min_delay, batch.delay)
            target = min(1.0, target, batch_q)
    cratio_factor = None
    #from here on, the compression ratio integer value is in per-1000:
    es = [(t, pixels, 1000*compressed_size*bpp//pixels//32) for (t, _, pixels, bpp, compressed_size, _) in list(statistics.encoding_stats) if pixels>=4096]
    if len(es)>=2:
        #use the recent vs average compression ratio
        #(add value to smooth things out a bit, so very low compression ratios don't skew things)
        ascore, rscore = calculate_timesize_weighted_average_score(es)
        bump = 0
        if ascore>rscore:
            #raise the quality
            #only if there is no backlog:
            if packets_backlog==0:
                smooth = 150
                bump = logp((float(smooth+ascore)/(smooth+rscore)))-1.0
        else:
            #lower the quality
            #more so if the compression is not doing very well:
            mult = (1000 + rscore)/2000.0           #mult should be in the range 0.5 to ~1.0
            smooth = 50
            bump = -logp((float(smooth+rscore)/(smooth+ascore))-1.0) * mult
        target += bump
        cratio_factor = ascore, rscore, int(100*bump)
    latency_q = -1
    if len(global_statistics.client_latency)>0 and global_statistics.recent_client_latency>0:
        #if the latency is too high, lower quality target:
        latency_q = 3.0 * statistics.target_latency / global_statistics.recent_client_latency
        target = min(target, latency_q)
    target = min(1.0, max(0.0, target))
    if min_speed>0:
        #discount the quality more aggressively if we have speed requirements to satisfy:
        #ie: for min_speed=50:
        #target=1.0   -> target=1.0
        #target=0.8   -> target=0.51
        #target=0.5   -> target=0.125
        #target=0     -> target=0
        target = target ** ((100.0 + 4*min_speed)/100.0)
    #raise the quality when there are not many recent damage events:
    ww, wh = window_dimensions
    if ww>0 and wh>0:
        now = time.time()
        damage_pixel_count = dict((lim, sum([w*h for t,_,_,w,h in list(statistics.last_damage_events) if t>=now-lim and t<now-lim+1])) for lim in range(1,11))
        pixl5 = sum(v for lim,v in damage_pixel_count.items() if lim<=5)
        pixn5 = sum(v for lim,v in damage_pixel_count.items() if lim>5)
        pctpixdamaged = float(pixl5)/(ww*wh)
        log("get_target_quality: target=%s (window %ix%i) pctpixdamaged=%i%%, dpc=%s", target, ww, wh, pctpixdamaged*100, damage_pixel_count)
        if pctpixdamaged<=0.5:
            target = min(1.0, target + (1.0-pctpixdamaged*2))
        if pixl5<pixn5:
            target = sqrt(target)
    #apply min-quality:
    mq = min(100.0, max(min_quality, 0.0))
    target_quality = mq + (100.0-mq) * target
    info = {
            "min_quality"   : min_quality,
            "min_speed"     : min_speed,
            "backlog_factor": (packets_backlog, pixels_backlog, int(100.0*pixels_bl)),
            }
    if cratio_factor:
        info["compression-ratio"] = cratio_factor
    if batch_q>=0:
        info["batch-delay-ratio"] = int(100.0*batch_q)
    if latency_q>=0:
        info["latency"] = int(100.0*latency_q)
    return info, target_quality
Esempio n. 2
0
def get_target_quality(wid, window_dimensions, batch, global_statistics,
                       statistics, min_quality, min_speed):
    low_limit = get_low_limit(global_statistics.mmap_size > 0,
                              window_dimensions)
    #***********************************************************
    # quality:
    #    0    for lowest quality (low bandwidth usage)
    #    100  for best quality (high bandwidth usage)
    # here we try minimize client-latency, packet-backlog and batch.delay
    # the compression ratio tells us if we can increase the quality
    packets_backlog, pixels_backlog, _ = statistics.get_client_backlog()
    pixels_bl = 1.0 - logp(pixels_backlog / low_limit)
    target = pixels_bl
    batch_q = -1
    if batch is not None:
        recs = len(batch.last_actual_delays)
        if recs > 0:
            #weighted average between start delay and min_delay
            #so when we start and we don't have any records, we don't lower quality
            #just because the start delay is higher than min_delay
            ref_delay = (batch.START_DELAY * 10.0 / recs +
                         batch.min_delay * recs) / (recs + 10.0 / recs)
            #anything less than N times the reference delay is good enough:
            N = 4
            batch_q = N * ref_delay / max(1, batch.min_delay, batch.delay)
            target = min(1.0, target, batch_q)
    cratio_factor = None
    #from here on, the compression ratio integer value is in per-1000:
    es = [(t, pixels, 1000 * compressed_size * bpp // pixels // 32)
          for (t, _, pixels, bpp, compressed_size,
               _) in list(statistics.encoding_stats) if pixels >= 4096]
    if len(es) >= 2:
        #use the recent vs average compression ratio
        #(add value to smooth things out a bit, so very low compression ratios don't skew things)
        ascore, rscore = calculate_timesize_weighted_average_score(es)
        bump = 0
        if ascore > rscore:
            #raise the quality
            #only if there is no backlog:
            if packets_backlog == 0:
                smooth = 150
                bump = logp((float(smooth + ascore) / (smooth + rscore))) - 1.0
        else:
            #lower the quality
            #more so if the compression is not doing very well:
            mult = (1000 +
                    rscore) / 2000.0  #mult should be in the range 0.5 to ~1.0
            smooth = 50
            bump = -logp((float(smooth + rscore) /
                          (smooth + ascore)) - 1.0) * mult
        target += bump
        cratio_factor = ascore, rscore, int(100 * bump)
    latency_q = -1
    if len(global_statistics.client_latency
           ) > 0 and global_statistics.recent_client_latency > 0:
        #if the latency is too high, lower quality target:
        latency_q = 3.0 * statistics.target_latency / global_statistics.recent_client_latency
        target = min(target, latency_q)
    target = min(1.0, max(0.0, target))
    if min_speed > 0:
        #discount the quality more aggressively if we have speed requirements to satisfy:
        #ie: for min_speed=50:
        #target=1.0   -> target=1.0
        #target=0.8   -> target=0.51
        #target=0.5   -> target=0.125
        #target=0     -> target=0
        target = target**((100.0 + 4 * min_speed) / 100.0)
    mq = min(100.0, max(min_quality, 0.0))
    target_quality = mq + (100.0 - mq) * target
    info = {
        "min_quality":
        min_quality,
        "min_speed":
        min_speed,
        "backlog_factor":
        (packets_backlog, pixels_backlog, int(100.0 * pixels_bl)),
    }
    if cratio_factor:
        info["compression-ratio"] = cratio_factor
    if batch_q >= 0:
        info["batch-delay-ratio"] = int(100.0 * batch_q)
    if latency_q >= 0:
        info["latency"] = int(100.0 * latency_q)
    return info, target_quality
Esempio n. 3
0
def get_target_quality(wid, window_dimensions, batch, global_statistics, statistics, min_quality, min_speed):
    low_limit = get_low_limit(global_statistics.mmap_size>0, window_dimensions)
    #***********************************************************
    # quality:
    #    0    for lowest quality (low bandwidth usage)
    #    100  for best quality (high bandwidth usage)
    # here we try minimize client-latency, packet-backlog and batch.delay
    # the compression ratio tells us if we can increase the quality
    packets_backlog, pixels_backlog, _ = statistics.get_client_backlog()
    pixels_bl = 1.0 - logp(pixels_backlog/low_limit)
    target = pixels_bl
    batch_q = -1
    if batch is not None:
        recs = len(batch.last_actual_delays)
        if recs>0:
            #weighted average between start delay and min_delay
            #so when we start and we don't have any records, we don't lower quality
            #just because the start delay is higher than min_delay
            ref_delay = (batch.START_DELAY*10.0/recs + batch.min_delay*recs) / (recs+10.0/recs)
            #anything less than twice the minimum is good enough:
            batch_q = ref_delay / max(1, batch.min_delay, batch.delay/2.0)
            target = min(1.0, target, batch_q)
    cratio_factor = None
    #from here on, the compression ratio integer value is in per-1000:
    es = [(t, pixels, 1000*compressed_size*bpp//pixels//32) for (t, _, pixels, bpp, compressed_size, _) in list(statistics.encoding_stats) if pixels>=4096]
    if len(es)>=2:
        #use the recent vs average compression ratio
        #(add 10 to smooth things out a bit, so very low compression ratios don't skew things)
        ascore, rscore = calculate_timesize_weighted_average_score(es)
        bump = 0
        if ascore>rscore:
            #raise the quality
            #only if there is no backlog:
            if packets_backlog==0:
                bump = logp((float(10+ascore)/(10+rscore))-1.0)
        else:
            #lower the quality
            #more so if the compression is not doing very well:
            mult = (1000 + rscore)/2000.0           #mult should be in the range 0.5 to ~1.0
            bump = -logp((float(10+rscore)/(10+ascore))-1.0) * mult
        target += bump
        cratio_factor = ascore, rscore, int(100*bump)
    latency_q = -1
    if len(global_statistics.client_latency)>0 and global_statistics.recent_client_latency>0:
        latency_q = 3.0 * statistics.target_latency / global_statistics.recent_client_latency
        target = min(target, latency_q)
    target = min(1.0, max(0.0, target))
    if min_speed>0:
        #discount the quality more aggressively if we have speed requirements to satisfy:
        #ie: for min_speed=50:
        #target=1.0   -> target=1.0
        #target=0.8   -> target=0.51
        #target=0.5   -> target=0.125
        #target=0     -> target=0
        target = target ** ((100.0 + 4*min_speed)/100.0)
    mq = min(100.0, max(min_quality, 0.0))
    target_quality = mq + (100.0-mq) * target
    info = {
            "min_quality"   : min_quality,
            "min_speed"     : min_speed,
            "backlog_factor": (packets_backlog, pixels_backlog, int(100.0*pixels_bl)),
            }
    if cratio_factor:
        info["compression-ratio"] = cratio_factor
    if batch_q>=0:
        info["batch-delay-ratio"] = int(100.0*batch_q)
    if latency_q>=0:
        info["latency"] = int(100.0*latency_q)
    return info, target_quality
Esempio n. 4
0
def get_target_quality(wid, window_dimensions, batch, global_statistics, statistics, min_quality, min_speed):
    info = {
        "min_quality"   : min_quality,
        "min_speed"     : min_speed,
        }
    low_limit = get_low_limit(global_statistics.mmap_size>0, window_dimensions)
    #***********************************************************
    # quality:
    #    0    for lowest quality (low bandwidth usage)
    #    100  for best quality (high bandwidth usage)
    # here we try minimize client-latency, packet-backlog and batch.delay
    # the compression ratio tells us if we can increase the quality
    packets_backlog, pixels_backlog, _ = statistics.get_client_backlog()
    pb_ratio = pixels_backlog/low_limit
    pixels_bl = 1.0 - logp(pb_ratio//4)     #4 frames behind -> min quality
    info["backlog_factor"] = packets_backlog, pixels_backlog, low_limit, pb_ratio, int(100.0*pixels_bl)
    target = pixels_bl
    if batch is not None:
        recs = len(batch.last_actual_delays)
        if recs>0 and not batch.locked:
            #weighted average between start delay and min_delay
            #so when we start and we don't have any records, we don't lower quality
            #just because the start delay is higher than min_delay
            ref_delay = (batch.START_DELAY*10.0/recs + batch.min_delay*recs) / (recs+10.0/recs)
            #anything less than N times the reference delay is good enough:
            N = 4
            batch_q = N * ref_delay / max(1, batch.min_delay, batch.delay)
            info["batch-delay-ratio"] = int(100.0*batch_q)
            target = min(1.0, target, batch_q)
    #from here on, the compression ratio integer value is in per-1000:
    es = [(t, pixels, 1000*compressed_size*bpp//pixels//32) for (t, _, pixels, bpp, compressed_size, _) in list(statistics.encoding_stats) if pixels>=4096]
    if len(es)>=2:
        #use the recent vs average compression ratio
        #(add value to smooth things out a bit, so very low compression ratios don't skew things)
        ascore, rscore = calculate_timesize_weighted_average_score(es)
        bump = 0
        if ascore>rscore:
            #raise the quality
            #only if there is no backlog:
            if packets_backlog==0:
                smooth = 150
                bump = logp((float(smooth+ascore)/(smooth+rscore)))-1.0
        else:
            #lower the quality
            #more so if the compression is not doing very well:
            mult = (1000 + rscore)/2000.0           #mult should be in the range 0.5 to ~1.0
            smooth = 50
            bump = -logp((float(smooth+rscore)/(smooth+ascore))-1.0) * mult
        target += bump
        info["compression-ratio"] = ascore, rscore, int(100*bump)
    if len(global_statistics.client_latency)>0 and global_statistics.recent_client_latency>0:
        #if the latency is too high, lower quality target:
        latency_q = 3.0 * statistics.target_latency / global_statistics.recent_client_latency
        target = min(target, latency_q)
        info["latency"] = int(100.0*latency_q)
    target = min(1.0, max(0.0, target))
    if min_speed>0:
        #discount the quality more aggressively if we have speed requirements to satisfy:
        #ie: for min_speed=50:
        #target=1.0   -> target=1.0
        #target=0.8   -> target=0.51
        #target=0.5   -> target=0.125
        #target=0     -> target=0
        target = target ** ((100.0 + 4*min_speed)/100.0)
    #raise the quality when there are not many recent damage events:
    ww, wh = window_dimensions
    if ww>0 and wh>0:
        now = time.time()
        damage_pixel_count = dict((lim, sum([w*h for t,_,_,w,h in list(statistics.last_damage_events) if t>=now-lim and t<now-lim+1])) for lim in range(1,11))
        pixl5 = sum(v for lim,v in damage_pixel_count.items() if lim<=5)
        pixn5 = sum(v for lim,v in damage_pixel_count.items() if lim>5)
        pctpixdamaged = float(pixl5)/(ww*wh)
        log("get_target_quality: target=%s (window %ix%i) pctpixdamaged=%i%%, dpc=%s", target, ww, wh, pctpixdamaged*100, damage_pixel_count)
        if pctpixdamaged<=0.5:
            target = min(1.0, target + (1.0-pctpixdamaged*2))
        if pixl5<pixn5:
            target = sqrt(target)
    #apply min-quality:
    mq = min(100.0, max(min_quality, 0.0))
    target_quality = mq + (100.0-mq) * target
    return info, target_quality
Esempio n. 5
0
def get_target_quality(window_dimensions, batch, global_statistics, statistics,
                       bandwidth_limit, min_quality, min_speed):
    low_limit = get_low_limit(global_statistics.mmap_size > 0,
                              window_dimensions)
    #***********************************************************
    # quality:
    #    0    for lowest quality (low bandwidth usage)
    #    100  for best quality (high bandwidth usage)
    # here we try minimize client-latency, packet-backlog and batch.delay
    # the compression ratio tells us if we can increase the quality

    #backlog factor:
    packets_backlog, pixels_backlog, _ = statistics.get_client_backlog()
    pb_ratio = pixels_backlog / low_limit
    pixels_bl_q = 1 - sqrt(
        pb_ratio / 8)  #8 frames behind or more -> min quality

    #bandwidth limit factor:
    bandwidth_q = 1
    if bandwidth_limit > 0:
        #below 10Mbps, lower the quality
        bandwidth_q = int(100 * sqrt(bandwidth_limit / (10.0 * 1000 * 1000)))

    #congestion factor:
    gcv = global_statistics.congestion_value
    congestion_q = 1 - gcv * 10

    #batch delay factor:
    batch_q = 1
    if batch is not None:
        recs = len(batch.last_actual_delays)
        if recs > 0 and not batch.locked:
            #weighted average between start delay and min_delay
            #so when we start and we don't have any records, we don't lower quality
            #just because the start delay is higher than min_delay
            #anything less than N times the reference delay is good enough:
            N = 3.0 - min_speed / 50.0
            #if the min-speed is high, reduce tolerance:
            tolerance = 10 - int(min_speed // 10)
            ref_delay = max(
                0, tolerance + N *
                (batch.start_delay * 10 + batch.min_delay * recs) //
                (recs + 10))
            batch_q = (N * ref_delay) / max(1, batch.min_delay, batch.delay)

    #latency limit factor:
    latency_q = 1
    if global_statistics.client_latency and global_statistics.recent_client_latency > 0:
        #if the recent latency is too high, keep quality lower:
        latency_q = 3.0 * statistics.target_latency / global_statistics.recent_client_latency

    #target is the lowest value of all those limits:
    target = max(
        0, min(1, pixels_bl_q, bandwidth_q, congestion_q, batch_q, latency_q))

    info = {}
    #boost based on recent compression ratio
    comp_boost = 0
    #from here on, the compression ratio integer value is in per-1000:
    es = tuple((t, pixels, 1000 * compressed_size * bpp // pixels // 32)
               for (t, _, pixels, bpp, compressed_size,
                    _) in tuple(statistics.encoding_stats) if pixels >= 4096)
    if len(es) >= 2:
        #use the recent vs average compression ratio
        #(add value to smooth things out a bit, so very low compression ratios don't skew things)
        comp_boost = 0
        ascore, rscore = calculate_timesize_weighted_average_score(es)
        if ascore > rscore:
            #raise the quality
            #but only if there is no backlog:
            if packets_backlog == 0:
                smooth = 150
                comp_boost = logp(
                    ((smooth + ascore) / (smooth + rscore))) - 1.0
        else:
            #lower the quality
            #more so if the compression is not doing very well:
            mult = (1000 +
                    rscore) / 2000.0  #mult should be in the range 0.5 to ~1.0
            smooth = 50
            comp_boost = -logp(((smooth + rscore) /
                                (smooth + ascore)) - 1.0) * mult
        info["compression-ratio"] = ascore, rscore
        target = max(0, target + comp_boost)

    #discount the quality more aggressively if we have speed requirements to satisfy:
    if min_speed > 0:
        #ie: for min_speed=50:
        #target=1.0   -> target=1.0
        #target=0.8   -> target=0.51
        #target=0.5   -> target=0.125
        #target=0     -> target=0
        target = target**((100.0 + 4 * min_speed) / 100.0)

    #raise the quality when there are not many recent damage events:
    ww, wh = window_dimensions
    if ww > 0 and wh > 0:
        lde = tuple(statistics.last_damage_events)
        if lde:
            now = monotonic()
            damage_pixel_count = tuple((lim,
                                        sum(w * h for t, _, _, w, h in lde
                                            if now - lim <= t < now - lim + 1))
                                       for lim in range(1, 11))
            pixl5 = sum(v for lim, v in damage_pixel_count if lim <= 5)
            pixn5 = sum(v for lim, v in damage_pixel_count if lim > 5)
            pctpixdamaged = pixl5 / (ww * wh)
            log(
                "get_target_quality: target=%3i%% (window %4ix%-4i) pctpixdamaged=%3i%%, dpc=%s",
                100 * target, ww, wh, pctpixdamaged * 100, damage_pixel_count)
            if pctpixdamaged < 0.5:
                target *= (1.5 - pctpixdamaged)
            if pixl5 < pixn5:
                target = sqrt(target)

    #apply min-quality:
    mq = min(100, max(min_quality, 0))
    quality = int(mq + (100 - mq) * target)
    quality = max(0, mq, min(100, quality))

    info.update({
        "min-quality":
        min_quality,
        "min-speed":
        min_speed,
        "backlog":
        (packets_backlog, pixels_backlog, low_limit, int(100 * pb_ratio)),
        "limits": {
            "backlog": int(pixels_bl_q * 100),
            "bandwidth": int(bandwidth_q * 100),
            "congestion": int(congestion_q * 100),
            "batch": int(batch_q * 100),
            "latency": int(latency_q * 100),
            "boost": int(comp_boost * 100),
        },
    })
    return info, int(quality)