def add_stats(self, info, prefix, suffix=""): #encoding stats: if len(self.encoding_stats) > 0: estats = list(self.encoding_stats) encodings_used = [x[0] for x in estats] def add_compression_stats(enc_stats, suffix): comp_ratios_pct = [] comp_times_ns = [] total_pixels = 0 total_time = 0.0 for _, pixels, bpp, compressed_size, compression_time in enc_stats: if compressed_size > 0 and pixels > 0: osize = pixels * bpp / 8 comp_ratios_pct.append( (100.0 * compressed_size / osize, pixels)) comp_times_ns.append( (1000.0 * 1000 * 1000 * compression_time / pixels, pixels)) total_pixels += pixels total_time += compression_time add_weighted_list_stats(info, prefix + "encoding.ratio_pct" + suffix, comp_ratios_pct) add_weighted_list_stats( info, prefix + "encoding.pixels_per_ns" + suffix, comp_times_ns) if total_time > 0: info[prefix + "encoding.pixels_encoded_per_second" + suffix] = int(total_pixels / total_time) add_compression_stats(estats, suffix=suffix) for encoding in encodings_used: enc_stats = [x for x in estats if x[0] == encoding] add_compression_stats(enc_stats, suffix="%s[%s]" % (suffix, encoding)) latencies = [x * 1000 for _, _, _, x in list(self.damage_in_latency)] add_list_stats(info, prefix + "damage.in_latency", latencies, show_percentile=[9]) latencies = [x * 1000 for _, _, _, x in list(self.damage_out_latency)] add_list_stats(info, prefix + "damage.out_latency", latencies, show_percentile=[9]) #per encoding totals: for encoding, totals in self.encoding_totals.items(): info[prefix + "total_frames%s[%s]" % (suffix, encoding)] = totals[0] info[prefix + "total_pixels%s[%s]" % (suffix, encoding)] = totals[1] info[prefix + "damage.events%s" % suffix] = self.damage_events_count info[prefix + "damage.packets_sent%s" % suffix] = self.packet_count
def add_stats(self, info, prefix, suffix=""): if len(self.last_delays)>0: batch_delays = [x for _,x in list(self.last_delays)] add_list_stats(info, prefix+"batch.delay"+suffix, batch_delays) if len(self.last_actual_delays)>0: batch_delays = [x for _,x in list(self.last_actual_delays)] add_list_stats(info, prefix+"batch.actual_delay"+suffix, batch_delays, show_percentile=[9]) for name, details, factor, weight in self.factors: key = prefix+"batch."+name info[key+suffix] = (int(100.0*factor), int(100.0*weight)) for k,v in details.items(): info[key+"."+k+suffix] = v
def add_stats(self, info, suffix=""): info["damage.events%s" % suffix] = self.damage_events_count info["damage.packets_sent%s" % suffix] = self.packet_count info["client.connection.mmap_bytecount%s" % suffix] = self.mmap_bytes_sent if self.min_client_latency is not None: info["client.latency%s.absmin" % suffix] = int( self.min_client_latency * 1000) qsizes = [x for _, x in list(self.damage_data_qsizes)] add_list_stats(info, "damage.data_queue.size%s" % suffix, qsizes) qsizes = [x for _, x in list(self.damage_packet_qsizes)] add_list_stats(info, "damage.packet_queue.size%s" % suffix, qsizes) latencies = [x * 1000 for (_, _, _, x) in list(self.client_latency)] add_list_stats(info, "client.latency%s" % suffix, latencies) add_list_stats(info, "server.ping_latency%s" % suffix, [1000.0 * x for _, x in list(self.server_ping_latency)]) add_list_stats(info, "client.ping_latency%s" % suffix, [1000.0 * x for _, x in list(self.client_ping_latency)]) #client pixels per second: now = time.time() time_limit = now - 30 #ignore old records (30s) #pixels per second: decode time and overall total_pixels = 0 #total number of pixels processed total_time = 0 #total decoding time start_time = None #when we start counting from (oldest record) region_sizes = [] for _, event_time, pixels, decode_time in list( self.client_decode_time): #time filter and ignore failed decoding (decode_time==0) if event_time < time_limit or decode_time <= 0: continue if start_time is None or start_time > event_time: start_time = event_time total_pixels += pixels total_time += decode_time region_sizes.append(pixels) log("total_time=%s, total_pixels=%s", total_time, total_pixels) if total_time > 0: pixels_decoded_per_second = int(total_pixels * 1000 * 1000 / total_time) info["encoding.pixels_decoded_per_second%s" % suffix] = pixels_decoded_per_second if start_time: elapsed = now - start_time pixels_per_second = int(total_pixels / elapsed) info["encoding.pixels_per_second%s" % suffix] = pixels_per_second info["encoding.regions_per_second%s" % suffix] = int( len(region_sizes) / elapsed) info["encoding.average_region_size%s" % suffix] = int( total_pixels / len(region_sizes))
def add_stats(self, info, prefix, suffix=""): if len(self.last_delays) > 0: batch_delays = [x for _, x in list(self.last_delays)] add_list_stats(info, prefix + "batch.delay" + suffix, batch_delays) if len(self.last_actual_delays) > 0: batch_delays = [x for _, x in list(self.last_actual_delays)] add_list_stats(info, prefix + "batch.actual_delay" + suffix, batch_delays, show_percentile=[9]) for name, details, factor, weight in self.factors: key = prefix + "batch." + name info[key + suffix] = (int(100.0 * factor), int(100.0 * weight)) for k, v in details.items(): info[key + "." + k + suffix] = v
def get_info(self): info = { "damage.events": self.damage_events_count, "damage.packets_sent": self.packet_count, "encoding.decode_errors": self.decode_errors, } qsizes = [x for _, x in list(self.compression_work_qsizes)] add_list_stats(info, "damage.data_queue.size", qsizes) qsizes = [x for _, x in list(self.packet_qsizes)] add_list_stats(info, "damage.packet_queue.size", qsizes) #client pixels per second: now = time.time() time_limit = now - 30 #ignore old records (30s) #pixels per second: decode time and overall total_pixels = 0 #total number of pixels processed total_time = 0 #total decoding time start_time = None #when we start counting from (oldest record) region_sizes = [] for _, event_time, pixels, decode_time in list( self.client_decode_time): #time filter and ignore failed decoding (decode_time==0) if event_time < time_limit or decode_time <= 0: continue if start_time is None or start_time > event_time: start_time = event_time total_pixels += pixels total_time += decode_time region_sizes.append(pixels) log("total_time=%s, total_pixels=%s", total_time, total_pixels) if total_time > 0: pixels_decoded_per_second = int(total_pixels * 1000 * 1000 / total_time) info[ "encoding.pixels_decoded_per_second"] = pixels_decoded_per_second if start_time: elapsed = now - start_time pixels_per_second = int(total_pixels / elapsed) info.update({ "encoding.pixels_per_second": pixels_per_second, "encoding.regions_per_second": int(len(region_sizes) / elapsed), "encoding.average_region_size": int(total_pixels / len(region_sizes)) }) return info
def get_info(self): info = { "min-delay" : self.min_delay, "max-delay" : self.max_delay, "timeout-delay" : self.timeout_delay, "locked" : self.locked} if len(self.last_delays)>0: batch_delays = [x for _,x in list(self.last_delays)] add_list_stats(info, "delay", batch_delays) if len(self.last_actual_delays)>0: batch_delays = [x for _,x in list(self.last_actual_delays)] add_list_stats(info, "actual_delay", batch_delays, show_percentile=[9]) for name, details, factor, weight in self.factors: info[name] = (int(100.0*factor), int(100.0*weight)) for k,v in details.items(): info[name+"."+k] = v return info
def add_stats(self, info, suffix=""): info["damage.events%s" % suffix] = self.damage_events_count info["damage.packets_sent%s" % suffix] = self.packet_count info["client.connection.mmap_bytecount%s" % suffix] = self.mmap_bytes_sent if self.min_client_latency is not None: info["client.latency%s.absmin" % suffix] = int(self.min_client_latency*1000) qsizes = [x for _,x in list(self.damage_data_qsizes)] add_list_stats(info, "damage.data_queue.size%s" % suffix, qsizes) qsizes = [x for _,x in list(self.damage_packet_qsizes)] add_list_stats(info, "damage.packet_queue.size%s" % suffix, qsizes) latencies = [x*1000 for (_, _, _, x) in list(self.client_latency)] add_list_stats(info, "client.latency%s" % suffix, latencies) add_list_stats(info, "server.ping_latency%s" % suffix, [1000.0*x for _, x in list(self.server_ping_latency)]) add_list_stats(info, "client.ping_latency%s" % suffix, [1000.0*x for _, x in list(self.client_ping_latency)]) #client pixels per second: now = time.time() time_limit = now-30 #ignore old records (30s) #pixels per second: decode time and overall total_pixels = 0 #total number of pixels processed total_time = 0 #total decoding time start_time = None #when we start counting from (oldest record) region_sizes = [] for _, event_time, pixels, decode_time in list(self.client_decode_time): #time filter and ignore failed decoding (decode_time==0) if event_time<time_limit or decode_time<=0: continue if start_time is None or start_time>event_time: start_time = event_time total_pixels += pixels total_time += decode_time region_sizes.append(pixels) debug("total_time=%s, total_pixels=%s", total_time, total_pixels) if total_time>0: pixels_decoded_per_second = int(total_pixels *1000*1000 / total_time) info["encoding.pixels_decoded_per_second%s" % suffix] = pixels_decoded_per_second if start_time: elapsed = now-start_time pixels_per_second = int(total_pixels/elapsed) info["encoding.pixels_per_second%s" % suffix] = pixels_per_second info["encoding.regions_per_second%s" % suffix] = int(len(region_sizes)/elapsed) info["encoding.average_region_size%s" % suffix] = int(total_pixels/len(region_sizes))
def get_info(self): info = { "min-delay": self.min_delay, "max-delay": self.max_delay, "timeout-delay": self.timeout_delay, "locked": self.locked } if len(self.last_delays) > 0: batch_delays = [x for _, x in list(self.last_delays)] add_list_stats(info, "delay", batch_delays) if len(self.last_actual_delays) > 0: batch_delays = [x for _, x in list(self.last_actual_delays)] add_list_stats(info, "actual_delay", batch_delays, show_percentile=[9]) for name, details, factor, weight in self.factors: info[name] = (int(100.0 * factor), int(100.0 * weight)) for k, v in details.items(): info[name + "." + k] = v return info
def get_info(self): info = { "damage.events" : self.damage_events_count, "damage.packets_sent" : self.packet_count, "encoding.decode_errors" : self.decode_errors, } qsizes = [x for _,x in list(self.compression_work_qsizes)] add_list_stats(info, "damage.data_queue.size", qsizes) qsizes = [x for _,x in list(self.packet_qsizes)] add_list_stats(info, "damage.packet_queue.size", qsizes) #client pixels per second: now = time.time() time_limit = now-30 #ignore old records (30s) #pixels per second: decode time and overall total_pixels = 0 #total number of pixels processed total_time = 0 #total decoding time start_time = None #when we start counting from (oldest record) region_sizes = [] for _, event_time, pixels, decode_time in list(self.client_decode_time): #time filter and ignore failed decoding (decode_time==0) if event_time<time_limit or decode_time<=0: continue if start_time is None or start_time>event_time: start_time = event_time total_pixels += pixels total_time += decode_time region_sizes.append(pixels) log("total_time=%s, total_pixels=%s", total_time, total_pixels) if total_time>0: pixels_decoded_per_second = int(total_pixels *1000*1000 / total_time) info["encoding.pixels_decoded_per_second"] = pixels_decoded_per_second if start_time: elapsed = now-start_time pixels_per_second = int(total_pixels/elapsed) info.update({ "encoding.pixels_per_second" : pixels_per_second, "encoding.regions_per_second" : int(len(region_sizes)/elapsed), "encoding.average_region_size" : int(total_pixels/len(region_sizes))}) return info
def get_info(self): info = { "damage.events" : self.damage_events_count, "damage.packets_sent" : self.packet_count} #encoding stats: if len(self.encoding_stats)>0: estats = list(self.encoding_stats) encodings_used = [x[0] for x in estats] def add_compression_stats(enc_stats, suffix=""): comp_ratios_pct = [] comp_times_ns = [] total_pixels = 0 total_time = 0.0 for _, pixels, bpp, compressed_size, compression_time in enc_stats: if compressed_size>0 and pixels>0: osize = pixels*bpp/8 comp_ratios_pct.append((100.0*compressed_size/osize, pixels)) comp_times_ns.append((1000.0*1000*1000*compression_time/pixels, pixels)) total_pixels += pixels total_time += compression_time add_weighted_list_stats(info, "encoding.ratio_pct"+suffix, comp_ratios_pct) add_weighted_list_stats(info, "encoding.pixels_per_ns"+suffix, comp_times_ns) if total_time>0: info["encoding.pixels_encoded_per_second"+suffix] = int(total_pixels / total_time) add_compression_stats(estats) for encoding in encodings_used: enc_stats = [x for x in estats if x[0]==encoding] add_compression_stats(enc_stats, suffix="[%s]" % encoding) latencies = [x*1000 for _, _, _, x in list(self.damage_in_latency)] add_list_stats(info, "damage.in_latency", latencies, show_percentile=[9]) latencies = [x*1000 for _, _, _, x in list(self.damage_out_latency)] add_list_stats(info, "damage.out_latency", latencies, show_percentile=[9]) #per encoding totals: for encoding, totals in self.encoding_totals.items(): info["total_frames[%s]" % encoding] = totals[0] info["total_pixels[%s]" % encoding] = totals[1] return info
def get_client_info(self): info = { "connection.mmap_bytecount" : self.mmap_bytes_sent} if self.min_client_latency is not None: info["latency.absmin"] = int(self.min_client_latency*1000) latencies = [x*1000 for (_, _, _, x) in list(self.client_latency)] add_list_stats(info, "latency", latencies) add_list_stats(info, "server.ping_latency", [1000.0*x for _, x in list(self.server_ping_latency)]) add_list_stats(info, "client.ping_latency", [1000.0*x for _, x in list(self.client_ping_latency)]) return info
def get_client_info(self): info = {"connection.mmap_bytecount": self.mmap_bytes_sent} if self.min_client_latency is not None: info["latency.absmin"] = int(self.min_client_latency * 1000) latencies = [x * 1000 for (_, _, _, x) in list(self.client_latency)] add_list_stats(info, "latency", latencies) add_list_stats(info, "server.ping_latency", [1000.0 * x for _, x in list(self.server_ping_latency)]) add_list_stats(info, "client.ping_latency", [1000.0 * x for _, x in list(self.client_ping_latency)]) return info