def populate_table(self): if self.table: self.alignment.remove(self.table) #remove expired requests: now = monotonic_time() self.requests = [x for x in self.requests if x[-1] > now] self.expire_labels = {} tb = TableBuilder(rows=1, columns=4, row_spacings=15) def l(s=""): return gtk.Label(s) #generate a new table: self.table = tb.get_table() if not self.requests: tb.add_row(l("No requests pending")) else: headers = [ l("URL / Filename"), l(""), l("Expires in"), l("Action") ] tb.add_row(*headers) for cb_answer, send_id, dtype, url, filesize, printit, openit, expires in self.requests: details = u"" if dtype == b"file" and filesize > 0: details = u"%sB" % std_unit_dec(filesize) expires_label = l() self.expire_labels[expires_label] = expires buttons = self.action_buttons(cb_answer, send_id, dtype, printit, openit) s = bytestostr(url) main_label = l(s) if dtype == b"url" and s.find("?") > 0 and len(s) > 48: parts = s.split("?", 1) main_label.set_label(parts[0] + "?..") main_label.set_tooltip_text(s) main_label.set_line_wrap(True) try: main_label.set_line_wrap_mode(pango.WrapMode.WORD_CHAR) except AttributeError: main_label.set_line_wrap_mode(pango.WRAP_WORD_CHAR) main_label.set_size_request(URI_MAX_WIDTH, -1) items = (main_label, l(details), expires_label, buttons) tb.add_row(*items) self.update_expires_label() self.alignment.add(self.table) self.table.show_all()
def process_client_packet(self, proto, packet): packet_type = bytestostr(packet[0]) log("process_client_packet: %s", packet_type) if packet_type == Protocol.CONNECTION_LOST: self.stop(proto, "client connection lost") return if packet_type == "set_deflate": #echo it back to the client: self.client_packets.put(packet) self.client_protocol.source_has_more() return if packet_type == "hello": if not self.client_challenge_packet: log.warn("Warning: invalid hello packet from client") log.warn(" received after initial authentication (dropped)") return log("forwarding client hello") log(" for challenge packet %s", self.client_challenge_packet) #update caps with latest hello caps from client: self.caps = typedict(packet[1]) #keep challenge data in the hello response: hello = self.filter_client_caps(self.caps, CLIENT_REMOVE_CAPS_CHALLENGE) self.queue_server_packet(("hello", hello)) return if packet_type == "ping_echo" and self.client_ping_timer and len( packet) >= 7 and packet[6] == strtobytes(self.uuid): #this is one of our ping packets: self.client_last_ping_echo = packet[1] self.client_last_ping_latency = 1000 * monotonic_time( ) - self.client_last_ping_echo log("ping-echo: client latency=%.1fms", self.client_last_ping_latency) return #the packet types below are forwarded: if packet_type == "disconnect": reason = bytestostr(packet[1]) log("got disconnect from client: %s", reason) if self.exit: self.client_protocol.close() else: self.stop(None, "disconnect from client", reason) elif packet_type == "send-file": if packet[6]: packet[6] = Compressed("file-data", packet[6]) elif packet_type == "send-file-chunk": if packet[3]: packet[3] = Compressed("file-chunk-data", packet[3]) self.queue_server_packet(packet)
def damage(self, wid, window, x, y, w, h, options=None): """ Main entry point from the window manager, we dispatch to the WindowSource for this window id (creating a new one if needed) """ if not self.can_send_window(window): return assert window is not None damage_options = {} if options: damage_options = options.copy() self.statistics.damage_last_events.append((wid, monotonic_time(), w*h)) ws = self.make_window_source(wid, window) ws.damage(x, y, w, h, damage_options)
def record_latency(self, wid, decode_time, start_send_at, end_send_at, pixels, bytecount, latency): now = monotonic_time() send_diff = now - start_send_at echo_diff = now - end_send_at send_latency = max(0, send_diff - decode_time / 1000.0 / 1000.0) echo_latency = max(0, echo_diff - decode_time / 1000.0 / 1000.0) log( "record_latency: took %6.1f ms round trip, %6.1f for echo, %6.1f for decoding of %8i pixels, %8i bytes sent over the network in %6.1f ms, %6.1f ms for echo", send_diff * 1000, echo_diff * 1000, decode_time / 1000, pixels, bytecount, send_latency * 1000, echo_latency * 1000) if self.min_client_latency is None or self.min_client_latency > send_latency: self.min_client_latency = send_latency self.client_latency.append((wid, now, pixels, send_latency)) self.frame_total_latency.append((wid, now, pixels, latency))
def peek_connection(conn, timeout=PEEK_TIMEOUT_MS): log = get_network_logger() log("peek_connection(%s, %i)", conn, timeout) PEEK_SIZE = 8192 start = monotonic_time() peek_data = b"" while not peek_data and int(1000*(monotonic_time()-start))<timeout: try: peek_data = conn.peek(PEEK_SIZE) except (OSError, IOError): pass except ValueError: log("peek_connection(%s, %i) failed", conn, timeout, exc_info=True) break if not peek_data: sleep(timeout/4000.0) line1 = b"" log("socket %s peek: got %i bytes", conn, len(peek_data)) if peek_data: line1 = peek_data.splitlines()[0] log("socket peek=%s", repr_ellipsized(peek_data, limit=512)) log("socket peek hex=%s", hexstr(peek_data[:128])) log("socket peek line1=%s", repr_ellipsized(bytestostr(line1))) return peek_data, line1
def pulseaudio_ended(proc): soundlog( "pulseaudio_ended(%s) pulseaudio_proc=%s, returncode=%s, closing=%s", proc, self.pulseaudio_proc, proc.returncode, self._closing) if self.pulseaudio_proc is None or self._closing: #cleared by cleanup already, ignore return elapsed = monotonic_time() - started_at if elapsed < 2: self.timeout_add(1000, pulseaudio_warning) else: soundlog.warn( "Warning: the pulseaudio server process has terminated after %i seconds", int(elapsed)) self.pulseaudio_proc = None
def get_info(self, proto=None, client_uuids=None): log("ServerBase.get_info%s", (proto, client_uuids)) start = monotonic_time() info = ServerCore.get_info(self, proto) server_info = info.setdefault("server", {}) if self.mem_bytes: server_info["total-memory"] = self.mem_bytes if client_uuids: sources = [ss for ss in self._server_sources.values() if ss.uuid in client_uuids] else: sources = tuple(self._server_sources.values()) log("info-request: sources=%s", sources) dgi = self.do_get_info(proto, sources) #ugly alert: merge nested dictionaries, #ie: do_get_info may return a dictionary for "server" and we already have one, # so we update it with the new values for k,v in dgi.items(): cval = info.get(k) if cval is None: info[k] = v continue cval.update(v) log("ServerBase.get_info took %.1fms", 1000.0*(monotonic_time()-start)) return info
def do_set_icon_from_file(self, filename): if not hasattr(self.tray_widget, "set_icon_theme_path"): self.tray_widget.set_icon(filename) self._has_icon = True return head, icon_name = os.path.split(filename) if head: log("do_set_icon_from_file(%s) setting icon theme path=%s", filename, head) self.tray_widget.set_icon_theme_path(head) #remove extension (wtf?) noext = os.path.splitext(icon_name)[0] log("do_set_icon_from_file(%s) setting icon=%s", filename, noext) self.tray_widget.set_icon(noext) self._has_icon = True self.icon_timestamp = monotonic_time()
def __init__(self, x, y, width, height, pixels, pixel_format, depth, rowstride, bytesperpixel=4, planes=PACKED, thread_safe=True, palette=None): self.x = x self.y = y self.width = width self.height = height self.pixels = pixels self.pixel_format = pixel_format self.depth = depth self.rowstride = rowstride self.bytesperpixel = bytesperpixel self.planes = planes self.thread_safe = thread_safe self.freed = False self.timestamp = int(monotonic_time()*1000) self.palette = palette
def send_ping(self): now_ms = int(1000.0 * monotonic_time()) self.send("ping", now_ms) wait = 2.0 if len(self.server_ping_latency) > 0: l = [x for _, x in tuple(self.server_ping_latency)] avg = sum(l) / len(l) wait = min(5, 1.0 + avg * 2.0) log( "send_ping() timestamp=%s, average server latency=%.1f, using max wait %.2fs", now_ms, 1000.0 * avg, wait) t = self.timeout_add(int(1000.0 * wait), self.check_server_echo, now_ms) self.ping_echo_timers[now_ms] = t return True
def send_mouse_position(self, packet): if self._mouse_position_timer: self._mouse_position_pending = packet return self._mouse_position_pending = packet now = monotonic_time() elapsed = int(1000 * (now - self._mouse_position_send_time)) delay = self._mouse_position_delay - elapsed mouselog("send_mouse_position(%s) elapsed=%i, delay left=%i", packet, elapsed, delay) if delay > 0: self._mouse_position_timer = self.timeout_add( delay, self.do_send_mouse_position) else: self.do_send_mouse_position()
def test_format_thread(self): packets = self.make_test_packets() N = 1000 many = self.repeat_list(packets, N) def get_packet_cb(): #log.info("get_packet_cb") try: packet = many.pop(0) return (packet, None, None, None, False, True, False) except IndexError: protocol.close() return (None, ) def process_packet_cb(proto, packet): if packet[0]==Protocol.CONNECTION_LOST: glib.timeout_add(1000, loop.quit) protocol = self.make_memory_protocol(None, process_packet_cb=process_packet_cb, get_packet_cb=get_packet_cb) conn = protocol._conn loop = glib.MainLoop() glib.timeout_add(TIMEOUT*1000, loop.quit) protocol.enable_compressor("lz4") protocol.enable_encoder("rencode") protocol.start() protocol.source_has_more() start = monotonic_time() loop.run() end = monotonic_time() assert protocol._closed log("protocol: %s", protocol) log("%s write-data=%s", conn, len(conn.write_data)) total_size = sum(len(packet) for packet in conn.write_data) elapsed = end-start log("bytes=%s, elapsed=%s", total_size, elapsed) log.info("\n%-9s format thread:\t\t\t%iMB/s", protocol.TYPE, int(total_size/elapsed//1024//1024)) n_packets = len(packets)*N log.info("\n%-9s packets formatted per second:\t\t%i", protocol.TYPE, int(n_packets/elapsed)) assert conn.write_data
def get_window_info(self): info = {} now = monotonic_time() for wid, encoder in self.video_encoders.items(): einfo = encoder.get_info() einfo["idle_time"] = int( now - self.video_encoders_last_used_time.get(wid, 0)) info[wid] = { "proxy": { "": encoder.get_type(), "encoder": einfo }, } enclog("get_window_info()=%s", info) return info
def send_client_ping(self): #if we've already sent one, check for the echo: if self.client_last_ping: delta = self.client_last_ping - self.client_last_ping_echo if delta > PING_WARNING: log.warn("Warning: late client ping, %i seconds", delta) if delta > PING_TIMEOUT: log.error("Error: client ping timeout, %i seconds", delta) self.stop(None, "proxy to client ping timeout") return False now = monotonic_time() self.client_last_ping = now self.queue_client_packet( ("ping", int(now * 1000), int(time() * 1000), self.uuid)) return True
def get_rgb_rawdata(window, x, y, width, height): """ Extracts pixels from the given pixmap """ start = monotonic_time() pixmap_w, pixmap_h = window.get_geometry()[2:4] # Just in case we somehow end up with damage larger than the pixmap, # we don't want to start requesting random chunks of memory (this # could happen if a window is resized but we don't throw away our # existing damage map): assert x >= 0 assert y >= 0 if x + width > pixmap_w: width = pixmap_w - x if y + height > pixmap_h: height = pixmap_h - y if width <= 0 or height <= 0: return None pixbuf = get_pixbuf_from_window(window, x, y, width, height) log("get_rgb_rawdata(..) pixbuf.get_from_drawable took %s ms", int(1000 * (monotonic_time() - start))) raw_data = pixbuf.get_pixels() rowstride = pixbuf.get_rowstride() return (x, y, width, height, raw_data, "RGB", 24, rowstride, 3)
def user_event(self): log("user_event()") self.last_user_event = monotonic_time() self.cancel_idle_grace_timeout() self.schedule_idle_grace_timeout() self.cancel_idle_timeout() self.schedule_idle_timeout() if self.idle: self.no_idle() try: self.notification_callbacks.pop(XPRA_IDLE_NOTIFICATION_ID) except KeyError: pass else: self.notify_close(XPRA_IDLE_NOTIFICATION_ID)
def init_vars(self): self.rectangle = None self.inout = 0, 0 #number of damage pixels within / outside the region self.score = 0 self.fps = 0 self.damaged = 0 #proportion of the rectangle that got damaged (percentage) self.set_at = 0 #value of the "damage event count" when the region was set self.counter = 0 #value of the "damage event count" recorded at "time" self.time = 0 #see above self.refresh_timer = None self.refresh_regions = [] self.last_scores = {} #keep track of how much extra we batch non-video regions (milliseconds): self.non_max_wait = 150 self.min_time = monotonic_time()
def record_congestion_event(self, source, late_pct=0, send_speed=0): if not self.bandwidth_detection: return gs = self.statistics if not gs: #window cleaned up? return now = monotonic_time() elapsed = now - self.bandwidth_warning_time bandwidthlog( "record_congestion_event(%s, %i, %i) bandwidth_warnings=%s, elapsed time=%i", source, late_pct, send_speed, self.bandwidth_warnings, elapsed) gs.last_congestion_time = now gs.congestion_send_speed.append((now, late_pct, send_speed)) if self.bandwidth_warnings and elapsed > CONGESTION_REPEAT_DELAY: #enough congestion events? T = 10 min_time = now - T count = len( tuple(True for x in gs.congestion_send_speed if x[0] > min_time)) bandwidthlog( "record_congestion_event: %i events in the last %i seconds (warnings after %i)", count, T, CONGESTION_WARNING_EVENT_COUNT) if count > CONGESTION_WARNING_EVENT_COUNT: self.bandwidth_warning_time = now nid = XPRA_BANDWIDTH_NOTIFICATION_ID summary = "Network Performance Issue" body = "Your network connection is struggling to keep up,\n" + \ "consider lowering the bandwidth limit,\n" + \ "or turning off automatic network congestion management.\n" + \ "Choosing 'ignore' will silence all further warnings." actions = [] if self.bandwidth_limit == 0 or self.bandwidth_limit > MIN_BANDWIDTH: actions += ["lower-bandwidth", "Lower bandwidth limit"] actions += ["bandwidth-off", "Turn off"] #if self.default_min_quality>10: # actions += ["lower-quality", "Lower quality"] actions += ["ignore", "Ignore"] hints = {} self.may_notify( nid, summary, body, actions, hints, icon_name="connect", user_callback=self.congestion_notification_callback)
def update_batch_delay(batch, factors): """ Given a list of factors of the form: [(description, factor, weight)] we calculate a new batch delay. We use a time-weighted average of previous delays as a starting value, then combine it with the new factors. """ current_delay = batch.delay now = monotonic_time() tv, tw = 0.0, 0.0 decay = max(1, logp(current_delay/batch.min_delay)/5.0) max_delay = batch.max_delay for delays, d_weight in ((batch.last_delays, 0.25), (batch.last_actual_delays, 0.75)): if delays is not None and len(delays)>0: #get the weighted average #older values matter less, we decay them according to how much we batch already #(older values matter more when we batch a lot) for when, delay in tuple(delays): #newer matter more: w = d_weight/(1.0+((now-when)/decay)**2) d = max(0, min(max_delay, delay)) tv += d*w tw += w hist_w = tw for x in factors: if len(x)!=4: log.warn("invalid factor line: %s" % str(x)) else: log("update_batch_delay: %-28s : %.2f,%.2f %s", x[0], x[2], x[3], x[1]) valid_factors = [x for x in factors if x is not None and len(x)==4] all_factors_weight = sum([w for _,_,_,w in valid_factors]) if all_factors_weight==0: log("update_batch_delay: no weights yet!") return for _, _, factor, weight in valid_factors: target_delay = max(0, min(max_delay, current_delay*factor)) w = max(1, hist_w)*weight/all_factors_weight tw += w tv += target_delay*w mv = 0 if batch.always: mv = batch.min_delay batch.delay = max(mv, min(max_delay, tv // tw)) log("update_batch_delay: delay=%i", batch.delay) batch.last_updated = now batch.factors = valid_factors
def client_ack_damage(self, damage_packet_sequence, wid, width, height, decode_time, message): """ The client is acknowledging a damage packet, we record the 'client decode time' (which is provided by the client) and WindowSource will calculate and record the "client latency". (since it knows when the "draw" packet was sent) """ if not self.send_windows: log.error("client_ack_damage when we don't send any window data!?") return if decode_time>0: self.statistics.client_decode_time.append((wid, monotonic_time(), width*height, decode_time)) ws = self.window_sources.get(wid) if ws: ws.damage_packet_acked(damage_packet_sequence, width, height, decode_time, message) self.may_recalculate(wid, width*height)
def queue_packet(self, packet, wid=0, pixels=0, start_send_cb=None, end_send_cb=None, fail_cb=None, wait_for_more=False): """ Add a new 'draw' packet to the 'packet_queue'. Note: this code runs in the non-ui thread """ now = monotonic_time() self.statistics.packet_qsizes.append((now, len(self.packet_queue))) if wid>0: self.statistics.damage_packet_qpixels.append( (now, wid, sum(x[2] for x in tuple(self.packet_queue) if x[1]==wid)) ) self.packet_queue.append((packet, wid, pixels, start_send_cb, end_send_cb, fail_cb, wait_for_more)) p = self.protocol if p: p.source_has_more()
def remote_logging_handler(self, log, level, msg, *args, **kwargs): #prevent loops (if our send call ends up firing another logging call): if self.in_remote_logging: return self.in_remote_logging = True def enc(x): try: return x.encode("utf8") except: return strtobytes(x) try: dtime = int(1000*(monotonic_time() - self.start_time)) data = self.compressed_wrapper("text", enc(msg % args), level=1) self.send("logging", level, data, dtime) exc_info = kwargs.get("exc_info") if exc_info is True: exc_info = sys.exc_info() if exc_info: for x in traceback.format_tb(exc_info[2]): self.send("logging", level, enc(x), dtime) try: etypeinfo = exc_info[0].__name__ except: etypeinfo = str(exc_info[0]) self.send("logging", level, enc("%s: %s" % (etypeinfo, exc_info[1])), dtime) if self.log_both: self.local_logging(log, level, msg, *args, **kwargs) except Exception as e: if self.exit_code is not None: #errors can happen during exit, don't care return self.local_logging(log, logging.WARNING, "Warning: failed to send logging packet:") self.local_logging(log, logging.WARNING, " %s" % e) self.local_logging(log, logging.WARNING, " original unformatted message: %s", msg) try: self.local_logging(log, level, msg, *args, **kwargs) except: pass try: exc_info = sys.exc_info() for x in traceback.format_tb(exc_info[2]): for v in x.splitlines(): self.local_logging(log, logging.WARNING, v) except: pass finally: self.in_remote_logging = False
def update_averages(self): if len(self.client_latency) > 0: data = [(when, latency) for _, when, _, latency in tuple(self.client_latency)] self.min_client_latency = min([x for _, x in data]) self.avg_client_latency, self.recent_client_latency = calculate_time_weighted_average( data) #client ping latency: from ping packets if len(self.client_ping_latency) > 0: data = tuple(self.client_ping_latency) self.min_client_ping_latency = min([x for _, x in data]) self.avg_client_ping_latency, self.recent_client_ping_latency = calculate_time_weighted_average( data) #server ping latency: from ping packets if len(self.server_ping_latency) > 0: data = tuple(self.server_ping_latency) self.min_server_ping_latency = min([x for _, x in data]) self.avg_server_ping_latency, self.recent_server_ping_latency = calculate_time_weighted_average( data) #set to 0 if we have less than 2 events in the last 60 seconds: now = monotonic_time() min_time = now - 60 css = tuple(x for x in self.congestion_send_speed if x[0] > min_time) acss = 0 if len(css) >= 2: #weighted average of the send speed over the last minute: acss = int(calculate_size_weighted_average(css)[0]) latest_ctime = self.congestion_send_speed[-1][0] elapsed = now - latest_ctime #require at least one recent event: if elapsed < 30: #as the last event recedes in the past, increase limit: acss *= 1 + elapsed self.avg_congestion_send_speed = int(acss) #how often we get congestion events: #first chunk it into second intervals min_time = now - 10 cst = tuple(x[0] for x in css) cps = [] for t in range(10): etime = now - t matches = tuple( 1 for x in cst if x > etime - 1 and x <= etime) or (0, ) cps.append((etime, sum(matches))) #log("cps(%s)=%s (now=%s)", cst, cps, now) self.congestion_value = time_weighted_average(cps)
def screen_size_changed(self, *args): log("screen_size_changed(%s) timer=%s", args, self.screen_size_change_timer) if self.screen_size_change_timer: return #update via timer so the data is more likely to be final (up to date) when we query it, #some properties (like _NET_WORKAREA for X11 clients via xposix "ClientExtras") may #trigger multiple calls to screen_size_changed, delayed by some amount #(sometimes up to 1s..) delay = 1000 #if we are suspending, wait longer: #(better chance that the suspend-resume cycle will have completed) if self._suspended_at > 0 and self._suspended_at - monotonic_time( ) < 5 * 1000: delay = 5 * 1000 self.screen_size_change_timer = self.timeout_add( delay, self.do_process_screen_size_change)
def rpc_call(self, rpc_type, rpc_args, reply_handler=None, error_handler=None): assert rpc_type in self.server_rpc_types, "server does not support %s rpc" % rpc_type rpcid = self.rpc_counter.increase() self.rpc_filter_pending(rpcid) #keep track of this request (for timeout / error and reply callbacks): req = monotonic_time( ), rpc_type, rpc_args, reply_handler, error_handler self.rpc_pending_requests[rpcid] = req log("sending %s rpc request %s to server: %s", rpc_type, rpcid, req) packet = ["rpc", rpc_type, rpcid] + rpc_args self.send(*packet) self.rpc_filter_timers[rpcid] = self.timeout_add( RPC_TIMEOUT, self.rpc_filter_pending, rpcid)
def reset(self): self.init_time = monotonic_time() self.client_decode_time = deque( maxlen=NRECS ) #records how long it took the client to decode frames: #(ack_time, no of pixels, decoding_time*1000*1000) self.encoding_stats = deque( maxlen=NRECS ) #encoding: (time, coding, pixels, bpp, compressed_size, encoding_time) # statistics: self.damage_in_latency = deque( maxlen=NRECS ) #records how long it took for a damage request to be sent #last NRECS: (sent_time, no of pixels, actual batch delay, damage_latency) self.damage_out_latency = deque( maxlen=NRECS ) #records how long it took for a damage request to be processed #last NRECS: (processed_time, no of pixels, actual batch delay, damage_latency) self.damage_ack_pending = {} #records when damage packets are sent #so we can calculate the "client_latency" when the client sends #the corresponding ack ("damage-sequence" packet - see "client_ack_damage") self.encoding_totals = { } #for each encoding, how many frames we sent and how many pixels in total self.encoding_pending = { } #damage regions waiting to be picked up by the encoding thread: #for each sequence no: (damage_time, w, h) self.last_damage_events = deque( maxlen=4 * NRECS) #every time we get a damage event, we record: time,x,y,w,h self.last_damage_event_time = 0 self.last_recalculate = 0 self.damage_events_count = 0 self.packet_count = 0 self.last_resized = 0 self.last_packet_time = 0 #these values are calculated from the values above (see update_averages) self.target_latency = self.DEFAULT_TARGET_LATENCY self.avg_damage_in_latency = self.DEFAULT_DAMAGE_LATENCY self.recent_damage_in_latency = self.DEFAULT_DAMAGE_LATENCY self.avg_damage_out_latency = self.DEFAULT_DAMAGE_LATENCY + self.DEFAULT_NETWORK_LATENCY self.recent_damage_out_latency = self.DEFAULT_DAMAGE_LATENCY + self.DEFAULT_NETWORK_LATENCY self.max_latency = self.DEFAULT_DAMAGE_LATENCY + self.DEFAULT_NETWORK_LATENCY self.avg_decode_speed = -1 self.recent_decode_speed = -1
def rpc_filter_pending(self, rpcid): """ removes timed out dbus requests """ del self.rpc_filter_timers[rpcid] for k in tuple(self.rpc_pending_requests.keys()): v = self.rpc_pending_requests.get(k) if v is None: continue t, rpc_type, _rpc_args, _reply_handler, ecb = v if 1000*(monotonic_time()-t)>=RPC_TIMEOUT: log.warn("%s rpc request: %s has timed out", rpc_type, _rpc_args) try: del self.rpc_pending_requests[k] if ecb is not None: ecb("timeout") except Exception as e: log.error("Error during timeout handler for %s rpc callback:", rpc_type) log.error(" %s", e) del e
def show_selected_session(self): #show this session: try: self.cleanup() proc = self.do_run_subcommand("top") exit_code = proc.wait() txt = "top subprocess terminated" attr = 0 if exit_code!=0: attr = curses.color_pair(RED) txt += " with error code %i" % exit_code if exit_code in EXIT_STR: txt += " (%s)" % EXIT_STR.get(exit_code, "").replace("_", " ") elif (exit_code-128) in SIGNAMES: #pylint: disable=superfluous-parens txt += " (%s)" % SIGNAMES[exit_code-128] self.message = monotonic_time(), txt, attr finally: self.stdscr = curses_init()
def may_recalculate(self, wid, pixel_count): if wid in self.calculate_window_ids: return #already scheduled v = self.calculate_window_pixels.get(wid, 0)+pixel_count self.calculate_window_pixels[wid] = v if v<MIN_PIXEL_RECALCULATE: return #not enough pixel updates statslog("may_recalculate(%i, %i) total %i pixels, scheduling recalculate work item", wid, pixel_count, v) self.calculate_window_ids.add(wid) if self.calculate_timer: #already due return delta = monotonic_time() - self.calculate_last_time RECALCULATE_DELAY = 1.0 #1s if delta>RECALCULATE_DELAY: add_work_item(self.recalculate_delays) else: self.calculate_timer = self.timeout_add(int(1000*(RECALCULATE_DELAY-delta)), add_work_item, self.recalculate_delays)
def __init__( self, protocol, disconnect_cb, session_name, setting_changed, socket_dir, unix_socket_paths, log_disconnect, bandwidth_limit, bandwidth_detection, ): global counter self.counter = counter.increase() self.protocol = protocol self.connection_time = monotonic_time() self.close_event = Event() self.disconnect = disconnect_cb self.session_name = session_name #holds actual packets ready for sending (already encoded) #these packets are picked off by the "protocol" via 'next_packet()' #format: packet, wid, pixels, start_send_cb, end_send_cb #(only packet is required - the rest can be 0/None for clipboard packets) self.packet_queue = deque() # the encode work queue is used by mixins that need to encode data before sending it, # ie: encodings and clipboard #this queue will hold functions to call to compress data (pixels, clipboard) #items placed in this queue are picked off by the "encode" thread, #the functions should add the packets they generate to the 'packet_queue' self.encode_work_queue = None self.encode_thread = None self.ordinary_packets = [] self.socket_dir = socket_dir self.unix_socket_paths = unix_socket_paths self.log_disconnect = log_disconnect self.setting_changed = setting_changed # network constraints: self.server_bandwidth_limit = bandwidth_limit self.bandwidth_detection = bandwidth_detection #these statistics are shared by all WindowSource instances: self.statistics = GlobalPerformanceStatistics()