Esempio n. 1
0
 def get_factors(self, target_latency, pixel_count):
     factors = []
     if len(self.client_latency)>0:
         #client latency: (we want to keep client latency as low as can be)
         metric = "client-latency"
         l = 0.005 + self.min_client_latency
         wm = logp(l / 0.020)
         factors.append(calculate_for_target(metric, l, self.avg_client_latency, self.recent_client_latency, aim=0.8, slope=0.005, smoothing=sqrt, weight_multiplier=wm))
     if len(self.client_ping_latency)>0:
         metric = "client-ping-latency"
         l = 0.005 + self.min_client_ping_latency
         wm = logp(l / 0.050)
         factors.append(calculate_for_target(metric, l, self.avg_client_ping_latency, self.recent_client_ping_latency, aim=0.95, slope=0.005, smoothing=sqrt, weight_multiplier=wm))
     if len(self.server_ping_latency)>0:
         metric = "server-ping-latency"
         l = 0.005 + self.min_server_ping_latency
         wm = logp(l / 0.050)
         factors.append(calculate_for_target(metric, l, self.avg_server_ping_latency, self.recent_server_ping_latency, aim=0.95, slope=0.005, smoothing=sqrt, weight_multiplier=wm))
     #packet queue size: (includes packets from all windows)
     factors.append(queue_inspect("packet-queue-size", self.packet_qsizes, smoothing=sqrt))
     #packet queue pixels (global):
     qpix_time_values = [(event_time, value) for event_time, _, value in list(self.damage_packet_qpixels)]
     factors.append(queue_inspect("packet-queue-pixels", qpix_time_values, div=pixel_count, smoothing=sqrt))
     #compression data queue: (This is an important metric since each item will consume a fair amount of memory and each will later on go through the other queues.)
     factors.append(queue_inspect("compression-work-queue", self.compression_work_qsizes))
     if self.mmap_size>0:
         #full: effective range is 0.0 to ~1.2
         full = 1.0-float(self.mmap_free_size)/self.mmap_size
         #aim for ~33%
         factors.append(("mmap-area", "%s%% full" % int(100*full), logp(3*full), (3*full)**2))
     return factors
Esempio n. 2
0
	def test_logp(self):
		for _ in range(1000):
			x = random.random()
			v = cystats.logp(x)
			assert v>=0 and v<=1
		for x in (0, 1):
			v = cystats.logp(x)
			assert v>=0 and v<=1
Esempio n. 3
0
 def get_factors(self, bandwidth_limit=0):
     factors = []
     def mayaddfac(metric, info, factor, weight):
         if weight>0.01:
             factors.append((metric, info, factor, weight))
     #ratio of "in" and "out" latency indicates network bottleneck:
     #(the difference between the two is the time it takes to send)
     if self.damage_in_latency and self.damage_out_latency:
         #prevent jitter from skewing the values too much
         ad = max(0.010, 0.040+self.avg_damage_out_latency-self.avg_damage_in_latency)
         rd = max(0.010, 0.040+self.recent_damage_out_latency-self.recent_damage_in_latency)
         metric = "damage-network-delay"
         #info: avg delay=%.3f recent delay=%.3f" % (ad, rd)
         mayaddfac(*calculate_for_average(metric, ad, rd))
     #client decode time:
     ads = self.avg_decode_speed
     rds = self.recent_decode_speed
     if ads>0 and rds>0:
         metric = "client-decode-speed"
         #info: avg=%.1f, recent=%.1f (MPixels/s)" % (ads/1000/1000, self.recent_decode_speed/1000/1000)
         #our calculate methods aims for lower values, so invert speed
         #this is how long it takes to send 1MB:
         avg1MB = 1.0*1024*1024/ads
         recent1MB = 1.0*1024*1024/rds
         weight_div = max(0.25, rds/(4*1000*1000))
         mayaddfac(*calculate_for_average(metric, avg1MB, recent1MB, weight_offset=0.0, weight_div=weight_div))
     ldet = self.last_damage_event_time
     if ldet:
         #If nothing happens for a while then we can reduce the batch delay,
         #however we must ensure this is not caused by a high system latency
         #so we ignore short elapsed times.
         elapsed = monotonic()-ldet
         mtime = max(0, elapsed-self.max_latency*2)
         #the longer the time, the more we slash:
         weight = sqrt(mtime)
         target = max(0, 1.0-mtime)
         metric = "damage-rate"
         info = {"elapsed"   : int(1000.0*elapsed),
                 "max_latency"   : int(1000.0*self.max_latency)}
         mayaddfac(metric, info, target, weight)
     if bandwidth_limit>0:
         #calculate how much bandwith we have used in the last second (in bps):
         #encoding_stats.append((end, coding, w*h, bpp, len(data), end-start))
         cutoff = monotonic()-1
         used = sum(v[4] for v in tuple(self.encoding_stats) if v[0]>cutoff) * 8
         info = {
             "budget"  : bandwidth_limit,
             "used"    : used,
             }
         #aim for 10% below the limit:
         target = used*110.0/100.0/bandwidth_limit
         #if we are getting close to or above the limit,
         #the certainty of this factor goes up:
         weight = max(0, target-1)*(5+logp(target))
         mayaddfac("bandwidth-limit", info, target, weight)
     return factors
Esempio n. 4
0
 def get_factors(self, pixel_count):
     factors = []
     def mayaddfac(metric, info, factor, weight):
         if weight>0.01:
             factors.append((metric, info, factor, weight))
     if self.client_latency:
         #client latency: (we want to keep client latency as low as can be)
         metric = "client-latency"
         l = 0.005 + self.min_client_latency
         wm = logp(l / 0.020)
         mayaddfac(*calculate_for_target(metric, l, self.avg_client_latency, self.recent_client_latency,
                                         aim=0.8, slope=0.005, smoothing=sqrt, weight_multiplier=wm))
     if self.client_ping_latency:
         metric = "client-ping-latency"
         l = 0.005 + self.min_client_ping_latency
         wm = logp(l / 0.050)
         mayaddfac(*calculate_for_target(metric, l, self.avg_client_ping_latency, self.recent_client_ping_latency,
                                         aim=0.95, slope=0.005, smoothing=sqrt, weight_multiplier=wm))
     if self.server_ping_latency:
         metric = "server-ping-latency"
         l = 0.005 + self.min_server_ping_latency
         wm = logp(l / 0.050)
         mayaddfac(*calculate_for_target(metric, l, self.avg_server_ping_latency, self.recent_server_ping_latency,
                                         aim=0.95, slope=0.005, smoothing=sqrt, weight_multiplier=wm))
     #packet queue size: (includes packets from all windows)
     mayaddfac(*queue_inspect("packet-queue-size", self.packet_qsizes, smoothing=sqrt))
     #packet queue pixels (global):
     qpix_time_values = tuple((event_time, value) for event_time, _, value in tuple(self.damage_packet_qpixels))
     mayaddfac(*queue_inspect("packet-queue-pixels", qpix_time_values, div=pixel_count, smoothing=sqrt))
     #compression data queue: (This is an important metric
     #since each item will consume a fair amount of memory
     #and each will later on go through the other queues.)
     mayaddfac(*queue_inspect("compression-work-queue", self.compression_work_qsizes))
     if self.mmap_size>0:
         #full: effective range is 0.0 to ~1.2
         full = 1.0-self.mmap_free_size/self.mmap_size
         #aim for ~33%
         mayaddfac("mmap-area", "%s%% full" % int(100*full), logp(3*full), (3*full)**2)
     if self.congestion_value>0:
         mayaddfac("congestion", {}, 1+self.congestion_value, self.congestion_value*10)
     return factors
Esempio n. 5
0
 def get_factors(self, pixel_count, delay):
     factors = []
     #ratio of "in" and "out" latency indicates network bottleneck:
     #(the difference between the two is the time it takes to send)
     if len(self.damage_in_latency)>0 and len(self.damage_out_latency)>0:
         #prevent jitter from skewing the values too much
         ad = max(0.010, 0.040+self.avg_damage_out_latency-self.avg_damage_in_latency)
         rd = max(0.010, 0.040+self.recent_damage_out_latency-self.recent_damage_in_latency)
         metric = "damage-network-delay"
         #info: avg delay=%.3f recent delay=%.3f" % (ad, rd)
         factors.append(calculate_for_average(metric, ad, rd))
     #send speed:
     ass = self.avg_send_speed
     rss = self.recent_send_speed
     if ass>0 and rss>0:
         #our calculate methods aims for lower values, so invert speed
         #this is how long it takes to send 1MB:
         avg1MB = 1.0*1024*1024/ass
         recent1MB = 1.0*1024*1024/rss
         #we only really care about this when the speed is quite low,
         #so adjust the weight accordingly:
         minspeed = float(128*1024)
         div = logp(max(rss, minspeed)/minspeed)
         metric = "network-send-speed"
         #info: avg=%s, recent=%s (KBytes/s), div=%s" % (int(self.avg_send_speed/1024), int(self.recent_send_speed/1024), div)
         factors.append(calculate_for_average(metric, avg1MB, recent1MB, weight_offset=1.0, weight_div=div))
     #client decode time:
     ads = self.avg_decode_speed
     rds = self.recent_decode_speed
     if ads>0 and rds>0:
         metric = "client-decode-speed"
         #info: avg=%.1f, recent=%.1f (MPixels/s)" % (ads/1000/1000, self.recent_decode_speed/1000/1000)
         #our calculate methods aims for lower values, so invert speed
         #this is how long it takes to send 1MB:
         avg1MB = 1.0*1024*1024/ads
         recent1MB = 1.0*1024*1024/rds
         weight_div = max(0.25, rds/(4*1000*1000))
         factors.append(calculate_for_average(metric, avg1MB, recent1MB, weight_offset=0.0, weight_div=weight_div))
     ldet = self.last_damage_event_time
     if ldet:
         #If nothing happens for a while then we can reduce the batch delay,
         #however we must ensure this is not caused by a high system latency
         #so we ignore short elapsed times.
         elapsed = time.time()-ldet
         mtime = max(0, elapsed-self.max_latency*2)
         #the longer the time, the more we slash:
         weight = sqrt(mtime)
         target = max(0, 1.0-mtime)
         metric = "damage-rate"
         info = {"elapsed"   : int(1000.0*elapsed),
                 "max_latency"   : int(1000.0*self.max_latency)}
         factors.append((metric, info, target, weight))
     return factors
Esempio n. 6
0
def update_batch_delay(batch, factors, min_delay=0):
    """
        Given a list of factors of the form:
        [(description, factor, weight)]
        we calculate a new batch delay.
        We use a time-weighted average of previous delays as a starting value,
        then combine it with the new factors.
    """
    current_delay = batch.delay
    now = monotonic()
    tv, tw = 0.0, 0.0
    decay = max(1, logp(current_delay / batch.min_delay) / 5.0)
    max_delay = batch.max_delay
    for delays, d_weight in ((batch.last_delays, 0.25),
                             (batch.last_actual_delays, 0.75)):
        delays = tuple(delays or ())
        #get the weighted average
        #older values matter less, we decay them according to how much we batch already
        #(older values matter more when we batch a lot)
        for when, delay in delays:
            #newer matter more:
            w = d_weight / (1.0 + ((now - when) / decay)**2)
            d = max(0, min(max_delay, delay))
            tv += d * w
            tw += w
    hist_w = tw
    for x in factors:
        if len(x) != 4:
            log.warn("invalid factor line: %s" % str(x))
        else:
            log("update_batch_delay: %-28s : %.2f,%.2f  %s", x[0], x[2], x[3],
                x[1])
    valid_factors = tuple(x for x in factors if x is not None and len(x) == 4)
    all_factors_weight = sum(vf[-1] for vf in valid_factors)
    if all_factors_weight == 0:
        log("update_batch_delay: no weights yet!")
        return
    for _, _, factor, weight in valid_factors:
        target_delay = max(0, min(max_delay, current_delay * factor))
        w = max(1, hist_w) * weight / all_factors_weight
        tw += w
        tv += target_delay * w
    batch.delay = int(max(min_delay, min(max_delay, tv // tw)))
    try:
        last_actual_delay = batch.last_actual_delays[-1][-1]
    except IndexError:
        last_actual_delay = -1
    log("update_batch_delay: delay=%i (last actual delay: %s)", batch.delay,
        last_actual_delay)
    batch.last_updated = now
    batch.factors = valid_factors
Esempio n. 7
0
def update_batch_delay(batch, factors):
    """
        Given a list of factors of the form:
        [(description, factor, weight)]
        we calculate a new batch delay.
        We use a time-weighted average of previous delays as a starting value,
        then combine it with the new factors.
    """
    current_delay = batch.delay
    now = time.time()
    tv, tw = 0.0, 0.0
    decay = max(1, logp(current_delay / batch.min_delay) / 5.0)
    max_delay = batch.max_delay
    for delays, d_weight in ((batch.last_delays, 0.25),
                             (batch.last_actual_delays, 0.75)):
        if delays is not None and len(delays) > 0:
            #get the weighted average
            #older values matter less, we decay them according to how much we batch already
            #(older values matter more when we batch a lot)
            for when, delay in list(delays):
                #newer matter more:
                w = d_weight / (1.0 + ((now - when) / decay)**2)
                d = max(0, min(max_delay, delay))
                tv += d * w
                tw += w
    hist_w = tw

    for x in factors:
        if len(x) != 4:
            log.warn("invalid factor line: %s" % str(x))
        else:
            log("update_batch_delay: %-28s : %.2f,%.2f  %s", x[0], x[2], x[3],
                x[1])
    valid_factors = [x for x in factors if x is not None and len(x) == 4]
    all_factors_weight = sum([w for _, _, _, w in valid_factors])
    if all_factors_weight == 0:
        log("update_batch_delay: no weights yet!")
        return
    for _, _, factor, weight in valid_factors:
        target_delay = max(0, min(max_delay, current_delay * factor))
        w = max(1, hist_w) * weight / all_factors_weight
        tw += w
        tv += target_delay * w
    mv = 0
    if batch.always:
        mv = batch.min_delay
    batch.delay = max(mv, min(max_delay, tv // tw))
    log("update_batch_delay: delay=%i", batch.delay)
    batch.last_updated = now
    batch.factors = valid_factors
Esempio n. 8
0
def update_batch_delay(batch, factors):
    """
        Given a list of factors of the form:
        [(description, factor, weight)]
        we calculate a new batch delay.
        We use a time-weighted average of previous delays as a starting value,
        then combine it with the new factors.
    """
    current_delay = batch.delay
    now = time.time()
    tv, tw = 0.0, 0.0
    decay = max(1, logp(current_delay/batch.min_delay)/5.0)
    max_delay = batch.max_delay
    for delays, d_weight in ((batch.last_delays, 0.25), (batch.last_actual_delays, 0.75)):
        if delays is not None and len(delays)>0:
            #get the weighted average
            #older values matter less, we decay them according to how much we batch already
            #(older values matter more when we batch a lot)
            for when, delay in list(delays):
                #newer matter more:
                w = d_weight/(1.0+((now-when)/decay)**2)
                d = max(0, min(max_delay, delay))
                tv += d*w
                tw += w
    hist_w = tw

    for x in factors:
        if len(x)!=4:
            log.warn("invalid factor line: %s" % str(x))
        else:
            log("update_batch_delay: %-28s : %.2f,%.2f  %s", x[0], x[2], x[3], x[1])
    valid_factors = [x for x in factors if x is not None and len(x)==4]
    all_factors_weight = sum([w for _,_,_,w in valid_factors])
    if all_factors_weight==0:
        log("update_batch_delay: no weights yet!")
        return
    for _, _, factor, weight in valid_factors:
        target_delay = max(0, min(max_delay, current_delay*factor))
        w = max(1, hist_w)*weight/all_factors_weight
        tw += w
        tv += target_delay*w
    mv = 0
    if batch.always:
        mv = batch.min_delay
    batch.delay = max(mv, min(max_delay, tv // tw))
    log("update_batch_delay: delay=%i", batch.delay)
    batch.last_updated = now
    batch.factors = valid_factors
Esempio n. 9
0
def get_target_quality(wid, window_dimensions, batch, global_statistics,
                       statistics, min_quality):
    low_limit = get_low_limit(global_statistics.mmap_size > 0,
                              window_dimensions)
    #***********************************************************
    # quality:
    #    0    for lowest quality (low bandwidth usage)
    #    100  for best quality (high bandwidth usage)
    # here we try minimize client-latency, packet-backlog and batch.delay
    packets_backlog, _, _ = statistics.get_client_backlog()
    packets_bl = 1.0 - logp(packets_backlog / low_limit)
    target = packets_bl
    batch_q = -1
    if batch is not None:
        recs = len(batch.last_actual_delays)
        if recs > 0:
            #weighted average between start delay and min_delay
            #so when we start and we don't have any records, we don't lower quality
            #just because the start delay is higher than min_delay
            ref_delay = (batch.START_DELAY * 10.0 / recs +
                         batch.min_delay * recs) / (recs + 10.0 / recs)
            #anything less than twice the minimum is good enough:
            batch_q = ref_delay / max(1, batch.min_delay, batch.delay / 2.0)
            target = min(1.0, target, batch_q)
    latency_q = -1
    if len(global_statistics.client_latency
           ) > 0 and global_statistics.recent_client_latency > 0:
        latency_q = 3.0 * statistics.target_latency / global_statistics.recent_client_latency
        target = min(target, latency_q)
    target = min(1.0, max(0.0, target))
    mq = min(100.0, max(min_quality, 0.0))
    target_quality = mq + (100.0 - mq) * target
    info = {
        "min_quality": min_quality,
        "backlog_factor": int(100.0 * packets_bl),
    }
    if batch_q >= 0:
        info["batch_factor"] = int(100.0 * batch_q)
    if latency_q >= 0:
        info["latency_factor"] = int(100.0 * latency_q)
    return info, target_quality
Esempio n. 10
0
def get_target_quality(wid, window_dimensions, batch, global_statistics, statistics, min_quality):
    low_limit = get_low_limit(global_statistics.mmap_size>0, window_dimensions)
    #***********************************************************
    # quality:
    #    0    for lowest quality (low bandwidth usage)
    #    100  for best quality (high bandwidth usage)
    # here we try minimize client-latency, packet-backlog and batch.delay
    packets_backlog, _, _ = statistics.get_client_backlog()
    packets_bl = 1.0 - logp(packets_backlog/low_limit)
    target = packets_bl
    batch_q = -1
    if batch is not None:
        recs = len(batch.last_actual_delays)
        if recs>0:
            #weighted average between start delay and min_delay
            #so when we start and we don't have any records, we don't lower quality
            #just because the start delay is higher than min_delay
            ref_delay = (batch.START_DELAY*10.0/recs + batch.min_delay*recs) / (recs+10.0/recs)
            #anything less than twice the minimum is good enough:
            batch_q = ref_delay / max(1, batch.min_delay, batch.delay/2.0)
            target = min(1.0, target, batch_q)
    latency_q = -1
    if len(global_statistics.client_latency)>0 and global_statistics.recent_client_latency>0:
        latency_q = 3.0 * statistics.target_latency / global_statistics.recent_client_latency
        target = min(target, latency_q)
    target = min(1.0, max(0.0, target))
    mq = min(100.0, max(min_quality, 0.0))
    target_quality = mq + (100.0-mq) * target
    info = {
            "min_quality"   : min_quality,
            "backlog_factor": int(100.0*packets_bl),
            }
    if batch_q>=0:
        info["batch_factor"] = int(100.0*batch_q)
    if latency_q>=0:
        info["latency_factor"] = int(100.0*latency_q)
    return info, target_quality
Esempio n. 11
0
def get_target_quality(wid, window_dimensions, batch, global_statistics, statistics, min_quality, min_speed):
    info = {
        "min_quality"   : min_quality,
        "min_speed"     : min_speed,
        }
    low_limit = get_low_limit(global_statistics.mmap_size>0, window_dimensions)
    #***********************************************************
    # quality:
    #    0    for lowest quality (low bandwidth usage)
    #    100  for best quality (high bandwidth usage)
    # here we try minimize client-latency, packet-backlog and batch.delay
    # the compression ratio tells us if we can increase the quality
    packets_backlog, pixels_backlog, _ = statistics.get_client_backlog()
    pb_ratio = pixels_backlog/low_limit
    pixels_bl = 1.0 - logp(pb_ratio//4)     #4 frames behind -> min quality
    info["backlog_factor"] = packets_backlog, pixels_backlog, low_limit, pb_ratio, int(100.0*pixels_bl)
    target = pixels_bl
    if batch is not None:
        recs = len(batch.last_actual_delays)
        if recs>0 and not batch.locked:
            #weighted average between start delay and min_delay
            #so when we start and we don't have any records, we don't lower quality
            #just because the start delay is higher than min_delay
            ref_delay = (batch.START_DELAY*10.0/recs + batch.min_delay*recs) / (recs+10.0/recs)
            #anything less than N times the reference delay is good enough:
            N = 4
            batch_q = N * ref_delay / max(1, batch.min_delay, batch.delay)
            info["batch-delay-ratio"] = int(100.0*batch_q)
            target = min(1.0, target, batch_q)
    #from here on, the compression ratio integer value is in per-1000:
    es = [(t, pixels, 1000*compressed_size*bpp//pixels//32) for (t, _, pixels, bpp, compressed_size, _) in list(statistics.encoding_stats) if pixels>=4096]
    if len(es)>=2:
        #use the recent vs average compression ratio
        #(add value to smooth things out a bit, so very low compression ratios don't skew things)
        ascore, rscore = calculate_timesize_weighted_average_score(es)
        bump = 0
        if ascore>rscore:
            #raise the quality
            #only if there is no backlog:
            if packets_backlog==0:
                smooth = 150
                bump = logp((float(smooth+ascore)/(smooth+rscore)))-1.0
        else:
            #lower the quality
            #more so if the compression is not doing very well:
            mult = (1000 + rscore)/2000.0           #mult should be in the range 0.5 to ~1.0
            smooth = 50
            bump = -logp((float(smooth+rscore)/(smooth+ascore))-1.0) * mult
        target += bump
        info["compression-ratio"] = ascore, rscore, int(100*bump)
    if len(global_statistics.client_latency)>0 and global_statistics.recent_client_latency>0:
        #if the latency is too high, lower quality target:
        latency_q = 3.0 * statistics.target_latency / global_statistics.recent_client_latency
        target = min(target, latency_q)
        info["latency"] = int(100.0*latency_q)
    target = min(1.0, max(0.0, target))
    if min_speed>0:
        #discount the quality more aggressively if we have speed requirements to satisfy:
        #ie: for min_speed=50:
        #target=1.0   -> target=1.0
        #target=0.8   -> target=0.51
        #target=0.5   -> target=0.125
        #target=0     -> target=0
        target = target ** ((100.0 + 4*min_speed)/100.0)
    #raise the quality when there are not many recent damage events:
    ww, wh = window_dimensions
    if ww>0 and wh>0:
        now = time.time()
        damage_pixel_count = dict((lim, sum([w*h for t,_,_,w,h in list(statistics.last_damage_events) if t>=now-lim and t<now-lim+1])) for lim in range(1,11))
        pixl5 = sum(v for lim,v in damage_pixel_count.items() if lim<=5)
        pixn5 = sum(v for lim,v in damage_pixel_count.items() if lim>5)
        pctpixdamaged = float(pixl5)/(ww*wh)
        log("get_target_quality: target=%s (window %ix%i) pctpixdamaged=%i%%, dpc=%s", target, ww, wh, pctpixdamaged*100, damage_pixel_count)
        if pctpixdamaged<=0.5:
            target = min(1.0, target + (1.0-pctpixdamaged*2))
        if pixl5<pixn5:
            target = sqrt(target)
    #apply min-quality:
    mq = min(100.0, max(min_quality, 0.0))
    target_quality = mq + (100.0-mq) * target
    return info, target_quality
Esempio n. 12
0
 def test_logp(self):
     for _ in range(10000):
         x = random.random()
         v = cystats.logp(x)
         assert v > 0 and v < 1
Esempio n. 13
0
def get_target_quality(wid, window_dimensions, batch, global_statistics, statistics, min_quality, min_speed):
    low_limit = get_low_limit(global_statistics.mmap_size>0, window_dimensions)
    #***********************************************************
    # quality:
    #    0    for lowest quality (low bandwidth usage)
    #    100  for best quality (high bandwidth usage)
    # here we try minimize client-latency, packet-backlog and batch.delay
    # the compression ratio tells us if we can increase the quality
    packets_backlog, pixels_backlog, _ = statistics.get_client_backlog()
    pixels_bl = 1.0 - logp(pixels_backlog/low_limit)
    target = pixels_bl
    batch_q = -1
    if batch is not None:
        recs = len(batch.last_actual_delays)
        if recs>0:
            #weighted average between start delay and min_delay
            #so when we start and we don't have any records, we don't lower quality
            #just because the start delay is higher than min_delay
            ref_delay = (batch.START_DELAY*10.0/recs + batch.min_delay*recs) / (recs+10.0/recs)
            #anything less than N times the reference delay is good enough:
            N = 4
            batch_q = N * ref_delay / max(1, batch.min_delay, batch.delay)
            target = min(1.0, target, batch_q)
    cratio_factor = None
    #from here on, the compression ratio integer value is in per-1000:
    es = [(t, pixels, 1000*compressed_size*bpp//pixels//32) for (t, _, pixels, bpp, compressed_size, _) in list(statistics.encoding_stats) if pixels>=4096]
    if len(es)>=2:
        #use the recent vs average compression ratio
        #(add value to smooth things out a bit, so very low compression ratios don't skew things)
        ascore, rscore = calculate_timesize_weighted_average_score(es)
        bump = 0
        if ascore>rscore:
            #raise the quality
            #only if there is no backlog:
            if packets_backlog==0:
                smooth = 150
                bump = logp((float(smooth+ascore)/(smooth+rscore)))-1.0
        else:
            #lower the quality
            #more so if the compression is not doing very well:
            mult = (1000 + rscore)/2000.0           #mult should be in the range 0.5 to ~1.0
            smooth = 50
            bump = -logp((float(smooth+rscore)/(smooth+ascore))-1.0) * mult
        target += bump
        cratio_factor = ascore, rscore, int(100*bump)
    latency_q = -1
    if len(global_statistics.client_latency)>0 and global_statistics.recent_client_latency>0:
        #if the latency is too high, lower quality target:
        latency_q = 3.0 * statistics.target_latency / global_statistics.recent_client_latency
        target = min(target, latency_q)
    target = min(1.0, max(0.0, target))
    if min_speed>0:
        #discount the quality more aggressively if we have speed requirements to satisfy:
        #ie: for min_speed=50:
        #target=1.0   -> target=1.0
        #target=0.8   -> target=0.51
        #target=0.5   -> target=0.125
        #target=0     -> target=0
        target = target ** ((100.0 + 4*min_speed)/100.0)
    #raise the quality when there are not many recent damage events:
    ww, wh = window_dimensions
    if ww>0 and wh>0:
        now = time.time()
        damage_pixel_count = dict((lim, sum([w*h for t,_,_,w,h in list(statistics.last_damage_events) if t>=now-lim and t<now-lim+1])) for lim in range(1,11))
        pixl5 = sum(v for lim,v in damage_pixel_count.items() if lim<=5)
        pixn5 = sum(v for lim,v in damage_pixel_count.items() if lim>5)
        pctpixdamaged = float(pixl5)/(ww*wh)
        log("get_target_quality: target=%s (window %ix%i) pctpixdamaged=%i%%, dpc=%s", target, ww, wh, pctpixdamaged*100, damage_pixel_count)
        if pctpixdamaged<=0.5:
            target = min(1.0, target + (1.0-pctpixdamaged*2))
        if pixl5<pixn5:
            target = sqrt(target)
    #apply min-quality:
    mq = min(100.0, max(min_quality, 0.0))
    target_quality = mq + (100.0-mq) * target
    info = {
            "min_quality"   : min_quality,
            "min_speed"     : min_speed,
            "backlog_factor": (packets_backlog, pixels_backlog, int(100.0*pixels_bl)),
            }
    if cratio_factor:
        info["compression-ratio"] = cratio_factor
    if batch_q>=0:
        info["batch-delay-ratio"] = int(100.0*batch_q)
    if latency_q>=0:
        info["latency"] = int(100.0*latency_q)
    return info, target_quality
Esempio n. 14
0
def get_target_speed(window_dimensions, batch, global_statistics, statistics,
                     bandwidth_limit, min_speed, speed_data):
    low_limit = get_low_limit(global_statistics.mmap_size > 0,
                              window_dimensions)
    #***********************************************************
    # encoding speed:
    #    0    for highest compression/slower
    #    100  for lowest compression/fast
    # here we try to minimize damage-latency and client decoding speed

    #backlog factor:
    _, pixels_backlog, _ = statistics.get_client_backlog()
    pb_ratio = pixels_backlog / low_limit
    pixels_bl_s = 100 - int(
        100 * logp(pb_ratio / 4))  #4 frames behind or more -> compress more

    #megapixels per second:
    mpixels = low_limit / 1024.0 / 1024.0
    #for larger window sizes, we should be downscaling,
    #and don't want to wait too long for those anyway:
    ref_damage_latency = (10 + 25 * (1 + mathlog(max(1, mpixels)))) / 1000.0

    adil = statistics.avg_damage_in_latency or 0
    #abs: try to never go higher than N times the reference latency:
    dam_lat_abs = max(0,
                      (adil - ref_damage_latency)) / (ref_damage_latency * 3)

    if batch.locked:
        target_damage_latency = ref_damage_latency
        dam_lat_rel = 0
        frame_delay = 0
        dam_lat_s = 100
    else:
        #calculate a target latency and try to get close to it
        avg_delay = batch.delay
        delays = tuple(batch.last_actual_delays)
        if delays:
            #average recent actual delay:
            avg_delay = time_weighted_average(delays)
        #and average that with the current delay (which is lower or equal):
        frame_delay = max(10, int((avg_delay + batch.delay) // 2))
        #ensure we always spend at least as much time encoding as we spend batching:
        #(one frame encoding whilst one frame is batching is our ideal result)
        target_damage_latency = max(ref_damage_latency, frame_delay / 1000.0)
        dam_target_speed = min_speed
        if speed_data:
            dam_target_speed = max(min_speed,
                                   time_weighted_average(speed_data))
        #rel: do we need to increase speed to reach the target:
        dam_lat_rel = dam_target_speed / 100.0 * adil / target_damage_latency
        #cap the speed if we're delaying frames longer than we should:
        #(so we spend more of that time compressing them better instead):
        dam_lat_s = int(100 * 2 * ref_damage_latency * 1000 // frame_delay)

    #if we have more pixels to encode, we may need to go faster
    #(this is important because the damage latency used by the other factors
    # may aggregate multiple damage requests into one packet - which may skip frames)
    #TODO: reconcile this with video regions
    #only count the last second's worth:
    now = monotonic()
    lim = now - 1.0
    lde = tuple(w * h for t, _, _, w, h in tuple(statistics.last_damage_events)
                if t >= lim)
    pixels = sum(lde)
    mpixels_per_s = pixels / (1024 * 1024)
    pps = 0.0
    pixel_rate_s = 100
    if len(lde) > 5 and mpixels_per_s >= 1:
        #above 50 MPixels/s, we should reach 100% speed
        #(even x264 peaks at tens of MPixels/s)
        pps = sqrt(mpixels_per_s / 50.0)
        #if there aren't many pixels,
        #we can spend more time compressing them better:
        #(since it isn't going to cost too much to compress)
        #ie: 2MPixels/s -> max_speed=60%
        pixel_rate_s = 20 + int(mpixels_per_s * 20)

    bandwidth_s = 100
    if bandwidth_limit > 0:
        #below N Mbps, lower the speed ceiling,
        #so we will compress better:
        N = 10
        bandwidth_s = int(100 * sqrt(bandwidth_limit / (N * 1000 * 1000)))

    gcv = global_statistics.congestion_value
    congestion_s = 100
    if gcv > 0:
        #apply strict limit for congestion events:
        congestion_s = max(0, int(100 - gcv * 1000))

    #ensure we decode at a reasonable speed (for slow / low-power clients)
    #maybe this should be configurable?
    min_decode_speed = 1 * 1000 * 1000  #MPixels/s
    ads = statistics.avg_decode_speed or 0
    dec_lat = 0
    if ads > 0:
        dec_lat = min_decode_speed / ads

    ms = min(100, max(min_speed, 0))
    max_speed = max(
        ms, min(pixels_bl_s, dam_lat_s, pixel_rate_s, bandwidth_s,
                congestion_s))
    #combine factors: use the highest one:
    target = min(1, max(dam_lat_abs, dam_lat_rel, dec_lat, pps, 0))
    #scale target between min_speed and 100:
    speed = int(ms + (100 - ms) * target)
    speed = max(ms, min(max_speed, speed))

    #expose data we used:
    info = {
        "low-limit": int(low_limit),
        "max-speed": int(max_speed),
        "min-speed": int(min_speed),
        "factors": {
            "damage-latency-abs": int(dam_lat_abs * 100),
            "damage-latency-rel": int(dam_lat_rel * 100),
            "decoding-latency": int(dec_lat * 100),
            "pixel-rate": int(pps * 100),
        },
        "limits": {
            "backlog": pixels_bl_s,
            "damage-latency": dam_lat_s,
            "pixel-rate": pixel_rate_s,
            "bandwidth-limit": bandwidth_s,
            "congestion": congestion_s,
        },
    }
    return info, int(speed), max_speed
Esempio n. 15
0
	def test_logp(self):
		for _ in range(10000):
			x = random.random()
			v = cystats.logp(x)
			assert v>0 and v<1
Esempio n. 16
0
def get_target_quality(window_dimensions, batch, global_statistics, statistics,
                       bandwidth_limit, min_quality, min_speed):
    low_limit = get_low_limit(global_statistics.mmap_size > 0,
                              window_dimensions)
    #***********************************************************
    # quality:
    #    0    for lowest quality (low bandwidth usage)
    #    100  for best quality (high bandwidth usage)
    # here we try minimize client-latency, packet-backlog and batch.delay
    # the compression ratio tells us if we can increase the quality

    #backlog factor:
    packets_backlog, pixels_backlog, _ = statistics.get_client_backlog()
    pb_ratio = pixels_backlog / low_limit
    pixels_bl_q = 1 - sqrt(
        pb_ratio / 8)  #8 frames behind or more -> min quality

    #bandwidth limit factor:
    bandwidth_q = 1
    if bandwidth_limit > 0:
        #below 10Mbps, lower the quality
        bandwidth_q = int(100 * sqrt(bandwidth_limit / (10.0 * 1000 * 1000)))

    #congestion factor:
    gcv = global_statistics.congestion_value
    congestion_q = 1 - gcv * 10

    #batch delay factor:
    batch_q = 1
    if batch is not None:
        recs = len(batch.last_actual_delays)
        if recs > 0 and not batch.locked:
            #weighted average between start delay and min_delay
            #so when we start and we don't have any records, we don't lower quality
            #just because the start delay is higher than min_delay
            #anything less than N times the reference delay is good enough:
            N = 3.0 - min_speed / 50.0
            #if the min-speed is high, reduce tolerance:
            tolerance = 10 - int(min_speed // 10)
            ref_delay = max(
                0, tolerance + N *
                (batch.start_delay * 10 + batch.min_delay * recs) //
                (recs + 10))
            batch_q = (N * ref_delay) / max(1, batch.min_delay, batch.delay)

    #latency limit factor:
    latency_q = 1
    if global_statistics.client_latency and global_statistics.recent_client_latency > 0:
        #if the recent latency is too high, keep quality lower:
        latency_q = 3.0 * statistics.target_latency / global_statistics.recent_client_latency

    #target is the lowest value of all those limits:
    target = max(
        0, min(1, pixels_bl_q, bandwidth_q, congestion_q, batch_q, latency_q))

    info = {}
    #boost based on recent compression ratio
    comp_boost = 0
    #from here on, the compression ratio integer value is in per-1000:
    es = tuple((t, pixels, 1000 * compressed_size * bpp // pixels // 32)
               for (t, _, pixels, bpp, compressed_size,
                    _) in tuple(statistics.encoding_stats) if pixels >= 4096)
    if len(es) >= 2:
        #use the recent vs average compression ratio
        #(add value to smooth things out a bit, so very low compression ratios don't skew things)
        comp_boost = 0
        ascore, rscore = calculate_timesize_weighted_average_score(es)
        if ascore > rscore:
            #raise the quality
            #but only if there is no backlog:
            if packets_backlog == 0:
                smooth = 150
                comp_boost = logp(
                    ((smooth + ascore) / (smooth + rscore))) - 1.0
        else:
            #lower the quality
            #more so if the compression is not doing very well:
            mult = (1000 +
                    rscore) / 2000.0  #mult should be in the range 0.5 to ~1.0
            smooth = 50
            comp_boost = -logp(((smooth + rscore) /
                                (smooth + ascore)) - 1.0) * mult
        info["compression-ratio"] = ascore, rscore
        target = max(0, target + comp_boost)

    #discount the quality more aggressively if we have speed requirements to satisfy:
    if min_speed > 0:
        #ie: for min_speed=50:
        #target=1.0   -> target=1.0
        #target=0.8   -> target=0.51
        #target=0.5   -> target=0.125
        #target=0     -> target=0
        target = target**((100.0 + 4 * min_speed) / 100.0)

    #raise the quality when there are not many recent damage events:
    ww, wh = window_dimensions
    if ww > 0 and wh > 0:
        lde = tuple(statistics.last_damage_events)
        if lde:
            now = monotonic()
            damage_pixel_count = tuple((lim,
                                        sum(w * h for t, _, _, w, h in lde
                                            if now - lim <= t < now - lim + 1))
                                       for lim in range(1, 11))
            pixl5 = sum(v for lim, v in damage_pixel_count if lim <= 5)
            pixn5 = sum(v for lim, v in damage_pixel_count if lim > 5)
            pctpixdamaged = pixl5 / (ww * wh)
            log(
                "get_target_quality: target=%3i%% (window %4ix%-4i) pctpixdamaged=%3i%%, dpc=%s",
                100 * target, ww, wh, pctpixdamaged * 100, damage_pixel_count)
            if pctpixdamaged < 0.5:
                target *= (1.5 - pctpixdamaged)
            if pixl5 < pixn5:
                target = sqrt(target)

    #apply min-quality:
    mq = min(100, max(min_quality, 0))
    quality = int(mq + (100 - mq) * target)
    quality = max(0, mq, min(100, quality))

    info.update({
        "min-quality":
        min_quality,
        "min-speed":
        min_speed,
        "backlog":
        (packets_backlog, pixels_backlog, low_limit, int(100 * pb_ratio)),
        "limits": {
            "backlog": int(pixels_bl_q * 100),
            "bandwidth": int(bandwidth_q * 100),
            "congestion": int(congestion_q * 100),
            "batch": int(batch_q * 100),
            "latency": int(latency_q * 100),
            "boost": int(comp_boost * 100),
        },
    })
    return info, int(quality)
Esempio n. 17
0
def get_target_quality(wid, window_dimensions, batch, global_statistics, statistics, min_quality, min_speed):
    low_limit = get_low_limit(global_statistics.mmap_size>0, window_dimensions)
    #***********************************************************
    # quality:
    #    0    for lowest quality (low bandwidth usage)
    #    100  for best quality (high bandwidth usage)
    # here we try minimize client-latency, packet-backlog and batch.delay
    # the compression ratio tells us if we can increase the quality
    packets_backlog, pixels_backlog, _ = statistics.get_client_backlog()
    pixels_bl = 1.0 - logp(pixels_backlog/low_limit)
    target = pixels_bl
    batch_q = -1
    if batch is not None:
        recs = len(batch.last_actual_delays)
        if recs>0:
            #weighted average between start delay and min_delay
            #so when we start and we don't have any records, we don't lower quality
            #just because the start delay is higher than min_delay
            ref_delay = (batch.START_DELAY*10.0/recs + batch.min_delay*recs) / (recs+10.0/recs)
            #anything less than twice the minimum is good enough:
            batch_q = ref_delay / max(1, batch.min_delay, batch.delay/2.0)
            target = min(1.0, target, batch_q)
    cratio_factor = None
    #from here on, the compression ratio integer value is in per-1000:
    es = [(t, pixels, 1000*compressed_size*bpp//pixels//32) for (t, _, pixels, bpp, compressed_size, _) in list(statistics.encoding_stats) if pixels>=4096]
    if len(es)>=2:
        #use the recent vs average compression ratio
        #(add 10 to smooth things out a bit, so very low compression ratios don't skew things)
        ascore, rscore = calculate_timesize_weighted_average_score(es)
        bump = 0
        if ascore>rscore:
            #raise the quality
            #only if there is no backlog:
            if packets_backlog==0:
                bump = logp((float(10+ascore)/(10+rscore))-1.0)
        else:
            #lower the quality
            #more so if the compression is not doing very well:
            mult = (1000 + rscore)/2000.0           #mult should be in the range 0.5 to ~1.0
            bump = -logp((float(10+rscore)/(10+ascore))-1.0) * mult
        target += bump
        cratio_factor = ascore, rscore, int(100*bump)
    latency_q = -1
    if len(global_statistics.client_latency)>0 and global_statistics.recent_client_latency>0:
        latency_q = 3.0 * statistics.target_latency / global_statistics.recent_client_latency
        target = min(target, latency_q)
    target = min(1.0, max(0.0, target))
    if min_speed>0:
        #discount the quality more aggressively if we have speed requirements to satisfy:
        #ie: for min_speed=50:
        #target=1.0   -> target=1.0
        #target=0.8   -> target=0.51
        #target=0.5   -> target=0.125
        #target=0     -> target=0
        target = target ** ((100.0 + 4*min_speed)/100.0)
    mq = min(100.0, max(min_quality, 0.0))
    target_quality = mq + (100.0-mq) * target
    info = {
            "min_quality"   : min_quality,
            "min_speed"     : min_speed,
            "backlog_factor": (packets_backlog, pixels_backlog, int(100.0*pixels_bl)),
            }
    if cratio_factor:
        info["compression-ratio"] = cratio_factor
    if batch_q>=0:
        info["batch-delay-ratio"] = int(100.0*batch_q)
    if latency_q>=0:
        info["latency"] = int(100.0*latency_q)
    return info, target_quality
Esempio n. 18
0
 def get_factors(self, pixel_count, delay):
     factors = []
     #ratio of "in" and "out" latency indicates network bottleneck:
     #(the difference between the two is the time it takes to send)
     if len(self.damage_in_latency) > 0 and len(
             self.damage_out_latency) > 0:
         ad = max(0.001,
                  self.avg_damage_out_latency - self.avg_damage_in_latency)
         rd = max(
             0.001,
             self.recent_damage_out_latency - self.recent_damage_in_latency)
         div = 0.040 / max(
             ad, rd)  #reduce weight for low latencies (matter less)
         metric = "damage-network-delay"
         #info: avg delay=%.3f recent delay=%.3f" % (ad, rd)
         factors.append(
             calculate_for_average(metric, ad, rd, weight_div=div))
     #send speed:
     if self.avg_send_speed is not None and self.recent_send_speed is not None:
         #our calculate methods aims for lower values, so invert speed
         #this is how long it takes to send 1MB:
         avg1MB = 1.0 * 1024 * 1024 / self.avg_send_speed
         recent1MB = 1.0 * 1024 * 1024 / self.recent_send_speed
         #we only really care about this when the speed is quite low,
         #so adjust the weight accordingly:
         minspeed = float(128 * 1024)
         div = logp(max(self.recent_send_speed, minspeed) / minspeed)
         metric = "network-send-speed"
         #info: avg=%s, recent=%s (KBytes/s), div=%s" % (int(self.avg_send_speed/1024), int(self.recent_send_speed/1024), div)
         factors.append(
             calculate_for_average(metric,
                                   avg1MB,
                                   recent1MB,
                                   weight_offset=1.0,
                                   weight_div=div))
     #client decode time:
     if self.avg_decode_speed is not None and self.recent_decode_speed is not None:
         metric = "client-decode-speed"
         #info: avg=%.1f, recent=%.1f (MPixels/s)" % (self.avg_decode_speed/1000/1000, self.recent_decode_speed/1000/1000)
         #our calculate methods aims for lower values, so invert speed
         #this is how long it takes to send 1MB:
         avg1MB = 1.0 * 1024 * 1024 / self.avg_decode_speed
         recent1MB = 1.0 * 1024 * 1024 / self.recent_decode_speed
         weight_div = max(0.25,
                          self.recent_decode_speed / (4 * 1000 * 1000))
         factors.append(
             calculate_for_average(metric,
                                   avg1MB,
                                   recent1MB,
                                   weight_offset=0.0,
                                   weight_div=weight_div))
     if self.last_damage_event_time:
         #If nothing happens for a while then we can reduce the batch delay,
         #however we must ensure this is not caused by a high system latency
         #so we ignore short elapsed times.
         elapsed = time.time() - self.last_damage_event_time
         mtime = max(0, elapsed - self.max_latency * 2)
         #the longer the time, the more we slash:
         weight = sqrt(mtime)
         target = max(0, 1.0 - mtime)
         metric = "damage-rate"
         info = {
             "elapsed": int(1000.0 * elapsed),
             "max_latency": int(1000.0 * self.max_latency)
         }
         factors.append((metric, info, target, weight))
     return factors
Esempio n. 19
0
def get_target_quality(wid, window_dimensions, batch, global_statistics,
                       statistics, min_quality, min_speed):
    low_limit = get_low_limit(global_statistics.mmap_size > 0,
                              window_dimensions)
    #***********************************************************
    # quality:
    #    0    for lowest quality (low bandwidth usage)
    #    100  for best quality (high bandwidth usage)
    # here we try minimize client-latency, packet-backlog and batch.delay
    # the compression ratio tells us if we can increase the quality
    packets_backlog, pixels_backlog, _ = statistics.get_client_backlog()
    pixels_bl = 1.0 - logp(pixels_backlog / low_limit)
    target = pixels_bl
    batch_q = -1
    if batch is not None:
        recs = len(batch.last_actual_delays)
        if recs > 0:
            #weighted average between start delay and min_delay
            #so when we start and we don't have any records, we don't lower quality
            #just because the start delay is higher than min_delay
            ref_delay = (batch.START_DELAY * 10.0 / recs +
                         batch.min_delay * recs) / (recs + 10.0 / recs)
            #anything less than N times the reference delay is good enough:
            N = 4
            batch_q = N * ref_delay / max(1, batch.min_delay, batch.delay)
            target = min(1.0, target, batch_q)
    cratio_factor = None
    #from here on, the compression ratio integer value is in per-1000:
    es = [(t, pixels, 1000 * compressed_size * bpp // pixels // 32)
          for (t, _, pixels, bpp, compressed_size,
               _) in list(statistics.encoding_stats) if pixels >= 4096]
    if len(es) >= 2:
        #use the recent vs average compression ratio
        #(add value to smooth things out a bit, so very low compression ratios don't skew things)
        ascore, rscore = calculate_timesize_weighted_average_score(es)
        bump = 0
        if ascore > rscore:
            #raise the quality
            #only if there is no backlog:
            if packets_backlog == 0:
                smooth = 150
                bump = logp((float(smooth + ascore) / (smooth + rscore))) - 1.0
        else:
            #lower the quality
            #more so if the compression is not doing very well:
            mult = (1000 +
                    rscore) / 2000.0  #mult should be in the range 0.5 to ~1.0
            smooth = 50
            bump = -logp((float(smooth + rscore) /
                          (smooth + ascore)) - 1.0) * mult
        target += bump
        cratio_factor = ascore, rscore, int(100 * bump)
    latency_q = -1
    if len(global_statistics.client_latency
           ) > 0 and global_statistics.recent_client_latency > 0:
        #if the latency is too high, lower quality target:
        latency_q = 3.0 * statistics.target_latency / global_statistics.recent_client_latency
        target = min(target, latency_q)
    target = min(1.0, max(0.0, target))
    if min_speed > 0:
        #discount the quality more aggressively if we have speed requirements to satisfy:
        #ie: for min_speed=50:
        #target=1.0   -> target=1.0
        #target=0.8   -> target=0.51
        #target=0.5   -> target=0.125
        #target=0     -> target=0
        target = target**((100.0 + 4 * min_speed) / 100.0)
    mq = min(100.0, max(min_quality, 0.0))
    target_quality = mq + (100.0 - mq) * target
    info = {
        "min_quality":
        min_quality,
        "min_speed":
        min_speed,
        "backlog_factor":
        (packets_backlog, pixels_backlog, int(100.0 * pixels_bl)),
    }
    if cratio_factor:
        info["compression-ratio"] = cratio_factor
    if batch_q >= 0:
        info["batch-delay-ratio"] = int(100.0 * batch_q)
    if latency_q >= 0:
        info["latency"] = int(100.0 * latency_q)
    return info, target_quality