示例#1
0
 def get_factors(self, target_latency, pixel_count):
     factors = []
     if len(self.client_latency)>0:
         #client latency: (we want to keep client latency as low as can be)
         metric = "client-latency"
         l = 0.005 + self.min_client_latency
         wm = logp(l / 0.020)
         factors.append(calculate_for_target(metric, l, self.avg_client_latency, self.recent_client_latency, aim=0.8, slope=0.005, smoothing=sqrt, weight_multiplier=wm))
     if len(self.client_ping_latency)>0:
         metric = "client-ping-latency"
         l = 0.005 + self.min_client_ping_latency
         wm = logp(l / 0.050)
         factors.append(calculate_for_target(metric, l, self.avg_client_ping_latency, self.recent_client_ping_latency, aim=0.95, slope=0.005, smoothing=sqrt, weight_multiplier=wm))
     if len(self.server_ping_latency)>0:
         metric = "server-ping-latency"
         l = 0.005 + self.min_server_ping_latency
         wm = logp(l / 0.050)
         factors.append(calculate_for_target(metric, l, self.avg_server_ping_latency, self.recent_server_ping_latency, aim=0.95, slope=0.005, smoothing=sqrt, weight_multiplier=wm))
     #damage packet queue size: (includes packets from all windows)
     factors.append(queue_inspect("damage-packet-queue-size", self.damage_packet_qsizes, smoothing=sqrt))
     #damage packet queue pixels (global):
     qpix_time_values = [(event_time, value) for event_time, _, value in list(self.damage_packet_qpixels)]
     factors.append(queue_inspect("damage-packet-queue-pixels", qpix_time_values, div=pixel_count, smoothing=sqrt))
     #damage data queue: (This is an important metric since each item will consume a fair amount of memory and each will later on go through the other queues.)
     factors.append(queue_inspect("damage-data-queue", self.damage_data_qsizes))
     if self.mmap_size>0:
         #full: effective range is 0.0 to ~1.2
         full = 1.0-float(self.mmap_free_size)/self.mmap_size
         #aim for ~33%
         factors.append(("mmap-area", "%s%% full" % int(100*full), logp(3*full), (3*full)**2))
     return factors
示例#2
0
def test_logp():
	start = time.time()
	for _ in xrange(100000):
		x = random.random()
		logp(x)
	end = time.time()
	print("logp:")
	print("elapsed time: %.1fms" % (1000*(end-start)))
示例#3
0
def test_logp():
    start = time.time()
    for _ in xrange(100000):
        x = random.random()
        logp(x)
    end = time.time()
    print("logp:")
    print("elapsed time: %.1fms" % (1000 * (end - start)))
示例#4
0
def get_target_quality(wid, window_dimensions, batch, global_statistics, statistics, min_quality):
    low_limit = get_low_limit(global_statistics.mmap_size>0, window_dimensions)
    #***********************************************************
    # quality:
    #    0    for lowest quality (low bandwidth usage)
    #    100  for best quality (high bandwidth usage)
    # here we try minimize client-latency, packet-backlog and batch.delay
    packets_backlog, _, _ = statistics.get_client_backlog()
    packets_bl = 1.0 - logp(packets_backlog/low_limit)
    target = packets_bl
    batch_q = -1
    if batch is not None:
        recs = len(batch.last_actual_delays)
        if recs>0:
            #weighted average between start delay and min_delay
            #so when we start and we don't have any records, we don't lower quality
            #just because the start delay is higher than min_delay
            ref_delay = (batch.START_DELAY*10.0/recs + batch.min_delay*recs) / (recs+10.0/recs)
            batch_q = ref_delay / max(batch.min_delay, batch.delay)
            target = min(1.0, target, batch_q)
    latency_q = -1
    if len(global_statistics.client_latency)>0 and global_statistics.recent_client_latency>0:
        latency_q = 3.0 * statistics.target_latency / global_statistics.recent_client_latency
        target = min(target, latency_q)
    target = min(1.0, max(0.0, target))
    mq = min(100.0, max(min_quality, 0.0))
    target_quality = mq + (100.0-mq) * target
    info = {
            "min_quality"   : min_quality,
            "backlog_factor": int(100.0*packets_bl),
            "batch_factor"  : int(100.0*batch_q),
            "latency_factor": int(100.0*latency_q),
            }
    return info, target_quality
示例#5
0
def get_target_quality(wid, window_dimensions, batch, global_statistics, statistics, min_quality):
    low_limit = get_low_limit(global_statistics.mmap_size>0, window_dimensions)
    #***********************************************************
    # quality:
    #    0    for lowest quality (low bandwidth usage)
    #    100  for best quality (high bandwidth usage)
    # here we try minimize client-latency, packet-backlog and batch.delay
    packets_backlog, _, _ = statistics.get_client_backlog()
    packets_bl = 1.0 - logp(packets_backlog/low_limit)
    target = packets_bl
    batch_q = -1
    recs = len(batch.last_actual_delays)
    if recs>0:
        #weighted average between start delay and min_delay
        #so when we start and we don't have any records, we don't lower quality
        #just because the start delay is higher than min_delay
        ref_delay = (batch.START_DELAY*10.0/recs + batch.min_delay*recs) / (recs+10.0/recs)
        batch_q = ref_delay / max(batch.min_delay, batch.delay)
        target = min(1.0, target, batch_q)
    latency_q = -1
    if len(global_statistics.client_latency)>0 and global_statistics.recent_client_latency>0:
        latency_q = 3.0 * statistics.target_latency / global_statistics.recent_client_latency
        target = min(target, latency_q)
    target = min(1.0, max(0.0, target))
    mq = min(100.0, max(min_quality, 0.0))
    target_quality = mq + (100.0-mq) * target
    info = {
            "min_quality"   : min_quality,
            "backlog_factor": int(100.0*packets_bl),
            "batch_factor"  : int(100.0*batch_q),
            "latency_factor": int(100.0*latency_q),
            }
    return info, target_quality
示例#6
0
 def get_factors(self, pixel_count, delay):
     factors = []
     #ratio of "in" and "out" latency indicates network bottleneck:
     #(the difference between the two is the time it takes to send)
     if len(self.damage_in_latency)>0 and len(self.damage_out_latency)>0:
         ad = max(0.001, self.avg_damage_out_latency-self.avg_damage_in_latency)
         rd = max(0.001, self.recent_damage_out_latency-self.recent_damage_in_latency)
         div = 0.040 / max(ad, rd)       #reduce weight for low latencies (matter less)
         metric = "damage-network-delay"
         #info: avg delay=%.3f recent delay=%.3f" % (ad, rd)
         factors.append(calculate_for_average(metric, ad, rd, weight_div=div))
     #send speed:
     if self.avg_send_speed is not None and self.recent_send_speed is not None:
         #our calculate methods aims for lower values, so invert speed
         #this is how long it takes to send 1MB:
         avg1MB = 1.0*1024*1024/self.avg_send_speed
         recent1MB = 1.0*1024*1024/self.recent_send_speed
         #we only really care about this when the speed is quite low,
         #so adjust the weight accordingly:
         minspeed = float(128*1024)
         div = logp(max(self.recent_send_speed, minspeed)/minspeed)
         metric = "network-send-speed"
         #info: avg=%s, recent=%s (KBytes/s), div=%s" % (int(self.avg_send_speed/1024), int(self.recent_send_speed/1024), div)
         factors.append(calculate_for_average(metric, avg1MB, recent1MB, weight_offset=1.0, weight_div=div))
     #client decode time:
     if self.avg_decode_speed is not None and self.recent_decode_speed is not None:
         metric = "client-decode-speed"
         #info: avg=%.1f, recent=%.1f (MPixels/s)" % (self.avg_decode_speed/1000/1000, self.recent_decode_speed/1000/1000)
         #our calculate methods aims for lower values, so invert speed
         #this is how long it takes to send 1MB:
         avg1MB = 1.0*1024*1024/self.avg_decode_speed
         recent1MB = 1.0*1024*1024/self.recent_decode_speed
         weight_div = max(0.25, self.recent_decode_speed/(4*1000*1000))
         factors.append(calculate_for_average(metric, avg1MB, recent1MB, weight_offset=0.0, weight_div=weight_div))
     if self.last_damage_event_time:
         #If nothing happens for a while then we can reduce the batch delay,
         #however we must ensure this is not caused by a high system latency
         #so we ignore short elapsed times.
         elapsed = time.time()-self.last_damage_event_time
         mtime = max(0, elapsed-self.max_latency*2)
         #the longer the time, the more we slash:
         weight = sqrt(mtime)
         target = max(0, 1.0-mtime)
         metric = "damage-rate"
         info = {"elapsed"   : int(1000.0*elapsed),
                 "max_latency"   : int(1000.0*self.max_latency)}
         factors.append((metric, info, target, weight))
     return factors
示例#7
0
 def get_factors(self, pixel_count, delay):
     factors = []
     #ratio of "in" and "out" latency indicates network bottleneck:
     #(the difference between the two is the time it takes to send)
     if len(self.damage_in_latency)>0 and len(self.damage_out_latency)>0:
         ad = max(0.001, self.avg_damage_out_latency-self.avg_damage_in_latency)
         rd = max(0.001, self.recent_damage_out_latency-self.recent_damage_in_latency)
         div = 0.040 / max(ad, rd)       #reduce weight for low latencies (matter less)
         metric = "damage-network-delay"
         #info: avg delay=%.3f recent delay=%.3f" % (ad, rd)
         factors.append(calculate_for_average(metric, ad, rd, weight_div=div))
     #send speed:
     if self.avg_send_speed is not None and self.recent_send_speed is not None:
         #our calculate methods aims for lower values, so invert speed
         #this is how long it takes to send 1MB:
         avg1MB = 1.0*1024*1024/self.avg_send_speed
         recent1MB = 1.0*1024*1024/self.recent_send_speed
         #we only really care about this when the speed is quite low,
         #so adjust the weight accordingly:
         minspeed = float(128*1024)
         div = logp(max(self.recent_send_speed, minspeed)/minspeed)
         metric = "network-send-speed"
         #info: avg=%s, recent=%s (KBytes/s), div=%s" % (int(self.avg_send_speed/1024), int(self.recent_send_speed/1024), div)
         factors.append(calculate_for_average(metric, avg1MB, recent1MB, weight_offset=1.0, weight_div=div))
     #client decode time:
     if self.avg_decode_speed is not None and self.recent_decode_speed is not None:
         metric = "client-decode-speed"
         #info: avg=%.1f, recent=%.1f (MPixels/s)" % (self.avg_decode_speed/1000/1000, self.recent_decode_speed/1000/1000)
         #our calculate methods aims for lower values, so invert speed
         #this is how long it takes to send 1MB:
         avg1MB = 1.0*1024*1024/self.avg_decode_speed
         recent1MB = 1.0*1024*1024/self.recent_decode_speed
         weight_div = max(0.25, self.recent_decode_speed/(4*1000*1000))
         factors.append(calculate_for_average(metric, avg1MB, recent1MB, weight_offset=0.0, weight_div=weight_div))
     if self.last_damage_event_time:
         #If nothing happens for a while then we can reduce the batch delay,
         #however we must ensure this is not caused by a high system latency
         #so we ignore short elapsed times.
         elapsed = time.time()-self.last_damage_event_time
         mtime = max(0, elapsed-self.max_latency*2)
         #the longer the time, the more we slash:
         weight = sqrt(mtime)
         target = max(0, 1.0-mtime)
         metric = "damage-rate"
         info = {"elapsed"   : int(1000.0*elapsed),
                 "max_latency"   : int(1000.0*self.max_latency)}
         factors.append((metric, info, target, weight))
     return factors
def update_batch_delay(batch, factors):
    """
        Given a list of factors of the form:
        [(description, factor, weight)]
        we calculate a new batch delay.
        We use a time-weighted average of previous delays as a starting value,
        then combine it with the new factors.
    """
    current_delay = batch.delay
    now = time.time()
    tv, tw = 0.0, 0.0
    decay = max(1, logp(current_delay/batch.min_delay)/5.0)
    max_delay = batch.max_delay
    for delays, d_weight in ((batch.last_delays, 0.25), (batch.last_actual_delays, 0.75)):
        if delays is not None and len(delays)>0:
            #get the weighted average
            #older values matter less, we decay them according to how much we batch already
            #(older values matter more when we batch a lot)
            for when, delay in list(delays):
                #newer matter more:
                w = d_weight/(1.0+((now-when)/decay)**2)
                d = max(0, min(max_delay, delay))
                tv += d*w
                tw += w
    hist_w = tw

    for x in factors:
        if len(x)!=4:
            log.warn("invalid factor line: %s" % str(x))
    valid_factors = [x for x in factors if x is not None and len(x)==4]
    all_factors_weight = sum([w for _,_,_,w in valid_factors])
    if all_factors_weight==0:
        log("update_batch_delay: no weights yet!")
        return
    for _, _, factor, weight in valid_factors:
        target_delay = max(0, min(max_delay, current_delay*factor))
        w = max(1, hist_w)*weight/all_factors_weight
        tw += w
        tv += target_delay*w
    mv = 0
    if batch.always:
        mv = batch.min_delay
    batch.delay = max(mv, min(max_delay, tv // tw))
    batch.last_updated = now
    batch.factors = valid_factors
示例#9
0
 def get_factors(self, target_latency, pixel_count):
     factors = []
     if len(self.client_latency) > 0:
         #client latency: (we want to keep client latency as low as can be)
         metric = "client-latency"
         l = 0.005 + self.min_client_latency
         wm = logp(l / 0.020)
         factors.append(
             calculate_for_target(metric,
                                  l,
                                  self.avg_client_latency,
                                  self.recent_client_latency,
                                  aim=0.8,
                                  slope=0.005,
                                  smoothing=sqrt,
                                  weight_multiplier=wm))
     if len(self.client_ping_latency) > 0:
         metric = "client-ping-latency"
         l = 0.005 + self.min_client_ping_latency
         wm = logp(l / 0.050)
         factors.append(
             calculate_for_target(metric,
                                  l,
                                  self.avg_client_ping_latency,
                                  self.recent_client_ping_latency,
                                  aim=0.95,
                                  slope=0.005,
                                  smoothing=sqrt,
                                  weight_multiplier=wm))
     if len(self.server_ping_latency) > 0:
         metric = "server-ping-latency"
         l = 0.005 + self.min_server_ping_latency
         wm = logp(l / 0.050)
         factors.append(
             calculate_for_target(metric,
                                  l,
                                  self.avg_server_ping_latency,
                                  self.recent_server_ping_latency,
                                  aim=0.95,
                                  slope=0.005,
                                  smoothing=sqrt,
                                  weight_multiplier=wm))
     #packet queue size: (includes packets from all windows)
     factors.append(
         queue_inspect("packet-queue-size",
                       self.packet_qsizes,
                       smoothing=sqrt))
     #packet queue pixels (global):
     qpix_time_values = [
         (event_time, value)
         for event_time, _, value in list(self.damage_packet_qpixels)
     ]
     factors.append(
         queue_inspect("packet-queue-pixels",
                       qpix_time_values,
                       div=pixel_count,
                       smoothing=sqrt))
     #compression data queue: (This is an important metric since each item will consume a fair amount of memory and each will later on go through the other queues.)
     factors.append(
         queue_inspect("compression-work-queue",
                       self.compression_work_qsizes))
     if self.mmap_size > 0:
         #full: effective range is 0.0 to ~1.2
         full = 1.0 - float(self.mmap_free_size) / self.mmap_size
         #aim for ~33%
         factors.append(("mmap-area", "%s%% full" % int(100 * full),
                         logp(3 * full), (3 * full)**2))
     return factors