def get_factors(self, bandwidth_limit=0): factors = [] def mayaddfac(metric, info, factor, weight): if weight>0.01: factors.append((metric, info, factor, weight)) #ratio of "in" and "out" latency indicates network bottleneck: #(the difference between the two is the time it takes to send) if self.damage_in_latency and self.damage_out_latency: #prevent jitter from skewing the values too much ad = max(0.010, 0.040+self.avg_damage_out_latency-self.avg_damage_in_latency) rd = max(0.010, 0.040+self.recent_damage_out_latency-self.recent_damage_in_latency) metric = "damage-network-delay" #info: avg delay=%.3f recent delay=%.3f" % (ad, rd) mayaddfac(*calculate_for_average(metric, ad, rd)) #client decode time: ads = self.avg_decode_speed rds = self.recent_decode_speed if ads>0 and rds>0: metric = "client-decode-speed" #info: avg=%.1f, recent=%.1f (MPixels/s)" % (ads/1000/1000, self.recent_decode_speed/1000/1000) #our calculate methods aims for lower values, so invert speed #this is how long it takes to send 1MB: avg1MB = 1.0*1024*1024/ads recent1MB = 1.0*1024*1024/rds weight_div = max(0.25, rds/(4*1000*1000)) mayaddfac(*calculate_for_average(metric, avg1MB, recent1MB, weight_offset=0.0, weight_div=weight_div)) ldet = self.last_damage_event_time if ldet: #If nothing happens for a while then we can reduce the batch delay, #however we must ensure this is not caused by a high system latency #so we ignore short elapsed times. elapsed = monotonic()-ldet mtime = max(0, elapsed-self.max_latency*2) #the longer the time, the more we slash: weight = sqrt(mtime) target = max(0, 1.0-mtime) metric = "damage-rate" info = {"elapsed" : int(1000.0*elapsed), "max_latency" : int(1000.0*self.max_latency)} mayaddfac(metric, info, target, weight) if bandwidth_limit>0: #calculate how much bandwith we have used in the last second (in bps): #encoding_stats.append((end, coding, w*h, bpp, len(data), end-start)) cutoff = monotonic()-1 used = sum(v[4] for v in tuple(self.encoding_stats) if v[0]>cutoff) * 8 info = { "budget" : bandwidth_limit, "used" : used, } #aim for 10% below the limit: target = used*110.0/100.0/bandwidth_limit #if we are getting close to or above the limit, #the certainty of this factor goes up: weight = max(0, target-1)*(5+logp(target)) mayaddfac("bandwidth-limit", info, target, weight) return factors
def get_factors(self, pixel_count, delay): factors = [] #ratio of "in" and "out" latency indicates network bottleneck: #(the difference between the two is the time it takes to send) if len(self.damage_in_latency)>0 and len(self.damage_out_latency)>0: #prevent jitter from skewing the values too much ad = max(0.010, 0.040+self.avg_damage_out_latency-self.avg_damage_in_latency) rd = max(0.010, 0.040+self.recent_damage_out_latency-self.recent_damage_in_latency) metric = "damage-network-delay" #info: avg delay=%.3f recent delay=%.3f" % (ad, rd) factors.append(calculate_for_average(metric, ad, rd)) #send speed: ass = self.avg_send_speed rss = self.recent_send_speed if ass>0 and rss>0: #our calculate methods aims for lower values, so invert speed #this is how long it takes to send 1MB: avg1MB = 1.0*1024*1024/ass recent1MB = 1.0*1024*1024/rss #we only really care about this when the speed is quite low, #so adjust the weight accordingly: minspeed = float(128*1024) div = logp(max(rss, minspeed)/minspeed) metric = "network-send-speed" #info: avg=%s, recent=%s (KBytes/s), div=%s" % (int(self.avg_send_speed/1024), int(self.recent_send_speed/1024), div) factors.append(calculate_for_average(metric, avg1MB, recent1MB, weight_offset=1.0, weight_div=div)) #client decode time: ads = self.avg_decode_speed rds = self.recent_decode_speed if ads>0 and rds>0: metric = "client-decode-speed" #info: avg=%.1f, recent=%.1f (MPixels/s)" % (ads/1000/1000, self.recent_decode_speed/1000/1000) #our calculate methods aims for lower values, so invert speed #this is how long it takes to send 1MB: avg1MB = 1.0*1024*1024/ads recent1MB = 1.0*1024*1024/rds weight_div = max(0.25, rds/(4*1000*1000)) factors.append(calculate_for_average(metric, avg1MB, recent1MB, weight_offset=0.0, weight_div=weight_div)) ldet = self.last_damage_event_time if ldet: #If nothing happens for a while then we can reduce the batch delay, #however we must ensure this is not caused by a high system latency #so we ignore short elapsed times. elapsed = time.time()-ldet mtime = max(0, elapsed-self.max_latency*2) #the longer the time, the more we slash: weight = sqrt(mtime) target = max(0, 1.0-mtime) metric = "damage-rate" info = {"elapsed" : int(1000.0*elapsed), "max_latency" : int(1000.0*self.max_latency)} factors.append((metric, info, target, weight)) return factors
def get_factors(self, pixel_count, delay): factors = [] #ratio of "in" and "out" latency indicates network bottleneck: #(the difference between the two is the time it takes to send) if len(self.damage_in_latency) > 0 and len( self.damage_out_latency) > 0: ad = max(0.001, self.avg_damage_out_latency - self.avg_damage_in_latency) rd = max( 0.001, self.recent_damage_out_latency - self.recent_damage_in_latency) div = 0.040 / max( ad, rd) #reduce weight for low latencies (matter less) metric = "damage-network-delay" #info: avg delay=%.3f recent delay=%.3f" % (ad, rd) factors.append( calculate_for_average(metric, ad, rd, weight_div=div)) #send speed: if self.avg_send_speed is not None and self.recent_send_speed is not None: #our calculate methods aims for lower values, so invert speed #this is how long it takes to send 1MB: avg1MB = 1.0 * 1024 * 1024 / self.avg_send_speed recent1MB = 1.0 * 1024 * 1024 / self.recent_send_speed #we only really care about this when the speed is quite low, #so adjust the weight accordingly: minspeed = float(128 * 1024) div = logp(max(self.recent_send_speed, minspeed) / minspeed) metric = "network-send-speed" #info: avg=%s, recent=%s (KBytes/s), div=%s" % (int(self.avg_send_speed/1024), int(self.recent_send_speed/1024), div) factors.append( calculate_for_average(metric, avg1MB, recent1MB, weight_offset=1.0, weight_div=div)) #client decode time: if self.avg_decode_speed is not None and self.recent_decode_speed is not None: metric = "client-decode-speed" #info: avg=%.1f, recent=%.1f (MPixels/s)" % (self.avg_decode_speed/1000/1000, self.recent_decode_speed/1000/1000) #our calculate methods aims for lower values, so invert speed #this is how long it takes to send 1MB: avg1MB = 1.0 * 1024 * 1024 / self.avg_decode_speed recent1MB = 1.0 * 1024 * 1024 / self.recent_decode_speed weight_div = max(0.25, self.recent_decode_speed / (4 * 1000 * 1000)) factors.append( calculate_for_average(metric, avg1MB, recent1MB, weight_offset=0.0, weight_div=weight_div)) if self.last_damage_event_time: #If nothing happens for a while then we can reduce the batch delay, #however we must ensure this is not caused by a high system latency #so we ignore short elapsed times. elapsed = time.time() - self.last_damage_event_time mtime = max(0, elapsed - self.max_latency * 2) #the longer the time, the more we slash: weight = sqrt(mtime) target = max(0, 1.0 - mtime) metric = "damage-rate" info = { "elapsed": int(1000.0 * elapsed), "max_latency": int(1000.0 * self.max_latency) } factors.append((metric, info, target, weight)) return factors