def populate(self, *args): if self.is_closed: return False self.client.send_ping() self.last_populate_time = time.time() #record bytecount every second: self.net_in_bytecount.append(self.connection.input_bytecount) self.net_out_bytecount.append(self.connection.output_bytecount) #pre-compute for graph: self.net_in_scale, self.net_in_data = values_to_diff_scaled_values( list(self.net_in_bytecount)[1:N_SAMPLES + 3], scale_unit=1000, min_scaled_value=50) self.net_out_scale, self.net_out_data = values_to_diff_scaled_values( list(self.net_out_bytecount)[1:N_SAMPLES + 3], scale_unit=1000, min_scaled_value=50) #count pixels in the last second: since = time.time() - 1 decoded = [0] + [ pixels for _, t, pixels in self.client.pixel_counter if t > since ] self.pixel_in_data.append(sum(decoded)) #update latency values #there may be more than one record for each second #so we have to average them to prevent the graph from "jumping": def get_ping_latency_records(src, size=25): recs = {} src_list = list(src) now = int(time.time()) while len(src_list) > 0 and len(recs) < size: when, value = src_list.pop() if when >= (now - 1): #ignore last second continue iwhen = int(when) cv = recs.get(iwhen) v = 1000.0 * value if cv: v = ( v + cv ) / 2.0 #not very fair if more than 2 values... but this shouldn't happen anyway recs[iwhen] = v #ensure we always have a record for the last N seconds, even an empty one for x in range(size): i = now - 2 - x if i not in recs: recs[i] = None return [recs.get(x) for x in sorted(recs.keys())] self.server_latency = get_ping_latency_records( self.client.server_ping_latency) self.client_latency = get_ping_latency_records( self.client.client_ping_latency) return not self.is_closed
def populate(self, *args): if self.is_closed: return False self.client.send_ping() self.last_populate_time = time.time() # record bytecount every second: self.net_in_bytecount.append(self.connection.input_bytecount) self.net_out_bytecount.append(self.connection.output_bytecount) # pre-compute for graph: self.net_in_scale, self.net_in_data = values_to_diff_scaled_values( list(self.net_in_bytecount)[1 : N_SAMPLES + 3], scale_unit=1000, min_scaled_value=50 ) self.net_out_scale, self.net_out_data = values_to_diff_scaled_values( list(self.net_out_bytecount)[1 : N_SAMPLES + 3], scale_unit=1000, min_scaled_value=50 ) # count pixels in the last second: since = time.time() - 1 decoded = [0] + [pixels for _, t, pixels in self.client.pixel_counter if t > since] self.pixel_in_data.append(sum(decoded)) # update latency values # there may be more than one record for each second # so we have to average them to prevent the graph from "jumping": def get_ping_latency_records(src, size=25): recs = {} src_list = list(src) now = int(time.time()) while len(src_list) > 0 and len(recs) < size: when, value = src_list.pop() if when >= (now - 1): # ignore last second continue iwhen = int(when) cv = recs.get(iwhen) v = 1000.0 * value if cv: v = (v + cv) / 2.0 # not very fair if more than 2 values... but this shouldn't happen anyway recs[iwhen] = v # ensure we always have a record for the last N seconds, even an empty one for x in range(size): i = now - 2 - x if i not in recs: recs[i] = None return [recs.get(x) for x in sorted(recs.keys())] self.server_latency = get_ping_latency_records(self.client.server_ping_latency) self.client_latency = get_ping_latency_records(self.client.client_ping_latency) return not self.is_closed
def test_values_to_diff_scaled_values(): in_data = [1,2,4,10,50,51,62,73,81,85,89] for scale in 1, 100, 10000: scale_units = [10, 1000] if scale>10: scale_units.append(scale) scale_units.append(scale*1000) for scale_unit in scale_units: in_scaled = [x*scale for x in in_data] out_data = values_to_diff_scaled_values(in_scaled, scale_unit=scale_unit, num_values=len(in_scaled)-1) print("values_to_diff_scaled_values(%s,%s)=%s" % (in_scaled, scale_unit, out_data))
def test_values_to_diff_scaled_values(): in_data = [1, 2, 4, 10, 50, 51, 62, 73, 81, 85, 89] for scale in 1, 100, 10000: scale_units = [10, 1000] if scale > 10: scale_units.append(scale) scale_units.append(scale * 1000) for scale_unit in scale_units: in_scaled = [x * scale for x in in_data] out_data = values_to_diff_scaled_values(in_scaled, scale_unit=scale_unit, num_values=len(in_scaled) - 1) print("values_to_diff_scaled_values(%s,%s)=%s" % (in_scaled, scale_unit, out_data))
def test_values_to_diff_scaled_values(self): in_data = [1, 2, 4, 10, 50, 51, 62, 73, 81, 85, 89] for scale in 1, 100, 10000: scale_units = [10, 1000] if scale > 10: scale_units.append(scale) scale_units.append(scale * 1000) for scale_unit in scale_units: in_scaled = [x * scale for x in in_data] oscale, out_data = values_to_diff_scaled_values( in_scaled, scale_unit=scale_unit, num_values=len(in_scaled) - 1 ) assert oscale > 0 # output will be a scaled multiple of: # [1, 2, 6, 40, 1, 11, 11, 8, 4, 4] assert out_data[1] / out_data[0] == 2 # 2/1 assert out_data[3] / out_data[4] == 40 # 40/1
def test_values_to_diff_scaled_values(self): in_data = [1, 2, 4, 10, 50, 51, 62, 73, 81, 85, 89] for scale in 1, 100, 10000: scale_units = [10, 1000] if scale > 10: scale_units.append(scale) scale_units.append(scale * 1000) for scale_unit in scale_units: in_scaled = [x * scale for x in in_data] oscale, out_data = values_to_diff_scaled_values( in_scaled, scale_unit=scale_unit, num_values=len(in_scaled) - 1) assert oscale > 0 #output will be a scaled multiple of: #[1, 2, 6, 40, 1, 11, 11, 8, 4, 4] assert out_data[1] / out_data[0] == 2 # 2/1 assert out_data[3] / out_data[4] == 40 # 40/1
def populate_graphs(self, *args): if self.client.server_info_request: self.client.send_info_request() box = self.tab_box _, h = get_preferred_size(box) _, bh = get_preferred_size(self.tab_button_box) if h<=0: return True start_x_offset = min(1.0, (time.time()-self.last_populate_time)*0.95) rect = box.get_allocation() h = max(200, h-bh-20, rect.height-bh-20) w = max(360, rect.width-20) #bandwidth graph: labels, datasets = [], [] if self.net_in_bytecount and self.net_out_bytecount: def unit(scale): if scale==1: return "" else: unit, value = to_std_unit(scale) if value==1: return str(unit) return "x%s%s" % (int(value), unit) net_in_scale, net_in_data = values_to_diff_scaled_values(list(self.net_in_bytecount)[1:N_SAMPLES+3], scale_unit=1000, min_scaled_value=50) net_out_scale, net_out_data = values_to_diff_scaled_values(list(self.net_out_bytecount)[1:N_SAMPLES+3], scale_unit=1000, min_scaled_value=50) labels += ["recv %sB/s" % unit(net_in_scale), "sent %sB/s" % unit(net_out_scale)] datasets += [net_in_data, net_out_data] if SHOW_PIXEL_STATS and self.client.windows_enabled: pixel_scale, in_pixels = values_to_scaled_values(list(self.pixel_in_data)[3:N_SAMPLES+4], min_scaled_value=100) datasets.append(in_pixels) labels.append("%s pixels/s" % unit(pixel_scale)) if SHOW_SOUND_STATS and self.sound_in_bytecount: sound_in_scale, sound_in_data = values_to_diff_scaled_values(list(self.sound_in_bytecount)[1:N_SAMPLES+3], scale_unit=1000, min_scaled_value=50) datasets.append(sound_in_data) labels.append("Speaker %sB/s" % unit(sound_in_scale)) if SHOW_SOUND_STATS and self.sound_out_bytecount: sound_out_scale, sound_out_data = values_to_diff_scaled_values(list(self.sound_out_bytecount)[1:N_SAMPLES+3], scale_unit=1000, min_scaled_value=50) datasets.append(sound_out_data) labels.append("Mic %sB/s" % unit(sound_out_scale)) if labels and datasets: pixmap = make_graph_pixmap(datasets, labels=labels, width=w, height=h/2, title="Bandwidth", min_y_scale=10, rounding=10, start_x_offset=start_x_offset) self.bandwidth_graph.set_size_request(*pixmap.get_size()) self.bandwidth_graph.set_from_pixmap(pixmap, None) if self.client.server_info_request: pass #latency graph: latency_graph_items = ( (self.avg_ping_latency, "network"), (self.avg_batch_delay, "batch delay"), (self.avg_damage_out_latency, "encode&send"), (self.avg_decoding_latency, "decoding"), (self.avg_total, "frame total"), ) latency_graph_values = [] labels = [] for l, name in latency_graph_items: if len(l)==0: continue l = list(l) if len(l)<20: for _ in range(20-len(l)): l.insert(0, None) latency_graph_values.append(l) labels.append(name) pixmap = make_graph_pixmap(latency_graph_values, labels=labels, width=w, height=h/2, title="Latency (ms)", min_y_scale=10, rounding=25, start_x_offset=start_x_offset) self.latency_graph.set_size_request(*pixmap.get_size()) self.latency_graph.set_from_pixmap(pixmap, None) return True
def populate(self, *args): if self.is_closed: return False self.client.send_ping() self.last_populate_time = time.time() #record bytecount every second: self.net_in_bytecount.append(self.connection.input_bytecount) self.net_out_bytecount.append(self.connection.output_bytecount) #pre-compute for graph: self.net_in_scale, self.net_in_data = values_to_diff_scaled_values(list(self.net_in_bytecount)[1:N_SAMPLES+3], scale_unit=1000, min_scaled_value=50) self.net_out_scale, self.net_out_data = values_to_diff_scaled_values(list(self.net_out_bytecount)[1:N_SAMPLES+3], scale_unit=1000, min_scaled_value=50) #count pixels in the last second: since = time.time()-1 decoded = [0]+[pixels for _,t,pixels in self.client.pixel_counter if t>since] self.pixel_in_data.append(sum(decoded)) #update latency values #there may be more than one record for each second #so we have to average them to prevent the graph from "jumping": def get_ping_latency_records(src, size=25): recs = {} src_list = list(src) now = int(time.time()) while len(src_list)>0 and len(recs)<size: when, value = src_list.pop() if when>=(now-1): #ignore last second continue iwhen = int(when) cv = recs.get(iwhen) v = 1000.0*value if cv: v = (v+cv) / 2.0 #not very fair if more than 2 values... but this shouldn't happen anyway recs[iwhen] = v #ensure we always have a record for the last N seconds, even an empty one for x in range(size): i = now-2-x if i not in recs: recs[i] = None return [recs.get(x) for x in sorted(recs.keys())] self.server_latency = get_ping_latency_records(self.client.server_ping_latency) self.client_latency = get_ping_latency_records(self.client.client_ping_latency) if self.client.server_last_info: #populate running averages for graphs: def getavg(name): return self.client.server_last_info.get("%s.avg" % name) def addavg(l, name): v = getavg(name) if v: l.append(v) addavg(self.avg_batch_delay, "batch.delay") addavg(self.avg_damage_out_latency, "damage.out_latency") if len(self.client.server_ping_latency)>0 and len(self.client.client_ping_latency)>0: spl = [1000.0*x for _,x in list(self.client.server_ping_latency)] cpl = [1000.0*x for _,x in list(self.client.client_ping_latency)] self.avg_ping_latency.append(sum(spl+cpl)/len(spl+cpl)) if len(self.client.pixel_counter)>0: tsize = 0 ttime = 0 for start_time, end_time, size in self.client.pixel_counter: ttime += 1000.0 * (end_time-start_time) * size tsize += size self.avg_decoding_latency.append(int(ttime/tsize)) #totals: ping latency is halved since we only care about sending, not sending+receiving els = [(self.avg_batch_delay, 1), (self.avg_damage_out_latency, 1), (self.avg_ping_latency, 2), (self.avg_decoding_latency, 1)] if len([x for x, _ in els if len(x)>0])==len(els): totals = [x[-1]/r for x, r in els] log("frame totals=%s", totals) self.avg_total.append(sum(totals)) return not self.is_closed