class Worker_Thread(Thread): """ A background thread which calls the functions we post to it. The functions are placed in a queue and only called once, when this thread gets around to it. """ def __init__(self): Thread.__init__(self, name="Worker_Thread") self.items = Queue() self.exit = False self.setDaemon(True) def __repr__(self): return "Worker_Thread(items=%s, exit=%s)" % (self.items.qsize(), self.exit) def stop(self, force=False): if self.exit: return items = tuple(x for x in tuple(self.items.queue) if x is not None) log("Worker_Thread.stop(%s) %i items still in work queue: %s", force, len(items), items) if force: if items: log.warn("Worker stop: %s items in the queue will not be run!", len(items)) self.items.put(None) self.items = Queue() self.exit = True else: if items: log.info("waiting for %s items in work queue to complete", len(items)) self.items.put(None) def add(self, item): if self.items.qsize() > 10: log.warn("Worker_Thread.items queue size is %s", self.items.qsize()) self.items.put(item) def run(self): debug("Worker_Thread.run() starting") while not self.exit: item = self.items.get() if item is None: debug("Worker_Thread.run() found end of queue marker") self.exit = True break try: debug("Worker_Thread.run() calling %s (queue size=%s)", item, self.items.qsize()) item() except Exception: log.error("Error in worker thread processing item %s", item, exc_info=True) debug("Worker_Thread.run() ended (queue size=%s)", self.items.qsize())
class Worker_Thread(Thread): """ A background thread which calls the functions we post to it. The functions are placed in a queue and only called once, when this thread gets around to it. """ def __init__(self): Thread.__init__(self, name="Worker_Thread") self.items = Queue() self.exit = False self.setDaemon(True) def __repr__(self): return "Worker_Thread(items=%s, exit=%s)" % (self.items.qsize(), self.exit) def stop(self, force=False): if self.exit: return if force: if self.items.qsize() > 0: log.warn( "Worker_Thread.stop(%s) %s items in work queue will not run!", force, self.items.qsize()) self.exit = True else: if self.items.qsize() > 0: log.info("waiting for %s items in work queue to complete", self.items.qsize()) debug("Worker_Thread.stop(%s) %s items in work queue: ", force, self.items) self.items.put(None) def add(self, item): if self.items.qsize() > 10: log.warn("Worker_Thread.items queue size is %s", self.items.qsize()) self.items.put(item) def run(self): debug("Worker_Thread.run() starting") while not self.exit: item = self.items.get() if item is None: break try: debug("Worker_Thread.run() calling %s (queue size=%s)", item, self.items.qsize()) item() except: log.error("Worker_Thread.run() error on %s", item, exc_info=True) debug("Worker_Thread.run() ended") self.exit = True
class Worker_Thread(Thread): """ A background thread which calls the functions we post to it. The functions are placed in a queue and only called once, when this thread gets around to it. """ def __init__(self): Thread.__init__(self, name="Worker_Thread") self.items = Queue() self.exit = False self.setDaemon(True) def __repr__(self): return "Worker_Thread(items=%s, exit=%s)" % (self.items.qsize(), self.exit) def stop(self, force=False): if self.exit: return if force: if self.items.qsize() > 0: log.warn("Worker stop: %s items in the queue will not be run!", self.items.qsize()) self.items.put(None) self.items = Queue() self.exit = True else: if self.items.qsize() > 0: log.info("waiting for %s items in work queue to complete", self.items.qsize()) debug("Worker_Thread.stop(%s) %s items in work queue", force, self.items) self.items.put(None) def add(self, item): if self.items.qsize() > 10: log.warn("Worker_Thread.items queue size is %s", self.items.qsize()) self.items.put(item) def run(self): debug("Worker_Thread.run() starting") while not self.exit: item = self.items.get() if item is None: debug("Worker_Thread.run() found end of queue marker") self.exit = True break try: debug("Worker_Thread.run() calling %s (queue size=%s)", item, self.items.qsize()) item() except: log.error("Error in worker thread processing item %s", item, exc_info=True) debug("Worker_Thread.run() ended (queue size=%s)", self.items.qsize())
class WebSocketConnection(SocketConnection): def __init__(self, socket, local, remote, target, socktype, ws_handler): SocketConnection.__init__(self, socket, local, remote, target, socktype) self.protocol_type = "websocket" self.ws_handler = ws_handler self.pending_read = Queue() def close(self): self.pending_read = Queue() SocketConnection.close(self) def read(self, n): #FIXME: we should try to honour n while self.is_active(): if self.pending_read.qsize(): buf = self.pending_read.get() log("read() returning pending read buffer, len=%i", len(buf)) self.input_bytecount += len(buf) return memoryview_to_bytes(buf) bufs, closed_string = self.ws_handler.recv_frames() if closed_string: log("read() closed_string: %s", memoryview_to_bytes(closed_string)) self.active = False log("read() got %i ws frames", len(bufs)) if bufs: buf = bufs[0] if len(bufs) > 1: for v in bufs[1:]: self.pending_read.put(v) self.input_bytecount += len(buf) return memoryview_to_bytes(buf) def write(self, buf): self.ws_handler.send_frames([memoryview_to_bytes(buf)]) self.output_bytecount += len(buf) return len(buf)
class Worker_Thread(Thread): def __init__(self): Thread.__init__(self, name="Worker_Thread") self.items = Queue() self.exit = False self.setDaemon(True) def stop(self, force=False): if force: if self.items.qsize()>0: log.warn("Worker_Thread.stop(%s) %s items in work queue will not run!", force, self.items.qsize()) self.exit = True else: if self.items.qsize()>0: log.info("waiting for %s items in work queue to complete", self.items.qsize()) debug("Worker_Thread.stop(%s) %s items in work queue: ", force, self.items) self.items.put(None) def add(self, item): if self.items.qsize()>10: log.warn("Worker_Thread.items queue size is %s", self.items.qsize()) self.items.put(item) def run(self): debug("Worker_Thread.run() starting") while not self.exit: item = self.items.get() if item is None: break try: debug("Worker_Thread.run() calling %s (queue size=%s)", item, self.items.qsize()) item() except: log.error("Worker_Thread.run() error on %s", item, exc_info=True) debug("Worker_Thread.run() ended")
class subprocess_callee(object): """ This is the callee side, wrapping the gobject we want to interact with. All the input received will be converted to method calls on the wrapped object. Subclasses should register the signal handlers they want to see exported back to the caller. The convenience connect_export(signal-name, *args) can be used to forward signals unmodified. You can also call send() to pass packets back to the caller. (there is no validation of which signals are valid or not) """ def __init__(self, input_filename="-", output_filename="-", wrapped_object=None, method_whitelist=None): self.name = "" self._input = None self._output = None self.input_filename = input_filename self.output_filename = output_filename self.method_whitelist = method_whitelist self.large_packets = [] #the gobject instance which is wrapped: self.wrapped_object = wrapped_object self.send_queue = Queue() self.protocol = None register_os_signals(self.handle_signal) self.setup_mainloop() def setup_mainloop(self): glib = import_glib() self.mainloop = glib.MainLoop() self.idle_add = glib.idle_add self.timeout_add = glib.timeout_add self.source_remove = glib.source_remove def connect_export(self, signal_name, *user_data): """ gobject style signal registration for the wrapped object, the signals will automatically be forwarded to the wrapper process using send(signal_name, *signal_args, *user_data) """ log("connect_export%s", [signal_name] + list(user_data)) args = list(user_data) + [signal_name] self.wrapped_object.connect(signal_name, self.export, *args) def export(self, *args): signal_name = args[-1] log("export(%s, ...)", signal_name) data = args[1:-1] self.send(signal_name, *tuple(data)) def start(self): self.protocol = self.make_protocol() self.protocol.start() try: self.run() return 0 except KeyboardInterrupt as e: log("start() KeyboardInterrupt %s", e) if str(e): log.warn("%s", e) return 0 except Exception: log.error("error in main loop", exc_info=True) return 1 finally: log("run() ended, calling cleanup and protocol close") self.cleanup() if self.protocol: self.protocol.close() self.protocol = None i = self._input if i: self._input = None try: i.close() except (OSError, IOError): log("%s.close()", i, exc_info=True) o = self._output if o: self._output = None try: o.close() except (OSError, IOError): log("%s.close()", o, exc_info=True) def make_protocol(self): #figure out where we read from and write to: if self.input_filename=="-": #disable stdin buffering: self._input = os.fdopen(sys.stdin.fileno(), 'rb', 0) setbinarymode(self._input.fileno()) else: self._input = open(self.input_filename, 'rb') if self.output_filename=="-": #disable stdout buffering: self._output = os.fdopen(sys.stdout.fileno(), 'wb', 0) setbinarymode(self._output.fileno()) else: self._output = open(self.output_filename, 'wb') #stdin and stdout wrapper: conn = TwoFileConnection(self._output, self._input, abort_test=None, target=self.name, socktype=self.name, close_cb=self.net_stop) conn.timeout = 0 protocol = Protocol(self, conn, self.process_packet, get_packet_cb=self.get_packet) setup_fastencoder_nocompression(protocol) protocol.large_packets = self.large_packets return protocol def run(self): self.mainloop.run() def net_stop(self): #this is called from the network thread, #we use idle add to ensure we clean things up from the main thread log("net_stop() will call stop from main thread") self.idle_add(self.stop) def cleanup(self): pass def stop(self): self.cleanup() p = self.protocol log("stop() protocol=%s", p) if p: self.protocol = None p.close() self.do_stop() def do_stop(self): log("stop() stopping mainloop %s", self.mainloop) self.mainloop.quit() def handle_signal(self, sig): """ This is for OS signals SIGINT and SIGTERM """ #next time, just stop: register_os_signals(self.signal_stop) signame = SIGNAMES.get(sig, sig) log("handle_signal(%s) calling stop from main thread", signame) self.send("signal", signame) self.timeout_add(0, self.cleanup) #give time for the network layer to send the signal message self.timeout_add(150, self.stop) def signal_stop(self, sig): """ This time we really want to exit without waiting """ signame = SIGNAMES.get(sig, sig) log("signal_stop(%s) calling stop", signame) self.stop() def send(self, *args): if HEXLIFY_PACKETS: args = args[:1]+[hexstr(str(x)[:32]) for x in args[1:]] log("send: adding '%s' message (%s items already in queue)", args[0], self.send_queue.qsize()) self.send_queue.put(args) p = self.protocol if p: p.source_has_more() INJECT_FAULT(p) def get_packet(self): try: item = self.send_queue.get(False) except Exception: item = None return (item, None, None, self.send_queue.qsize()>0) def process_packet(self, proto, packet): command = bytestostr(packet[0]) if command==Protocol.CONNECTION_LOST: log("connection-lost: %s, calling stop", packet[1:]) self.net_stop() return if command==Protocol.GIBBERISH: log.warn("gibberish received:") log.warn(" %s", repr_ellipsized(packet[1], limit=80)) log.warn(" stopping") self.net_stop() return if command=="stop": log("received stop message") self.net_stop() return if command=="exit": log("received exit message") sys.exit(0) return #make it easier to hookup signals to methods: attr = command.replace("-", "_") if self.method_whitelist is not None and attr not in self.method_whitelist: log.warn("invalid command: %s (not in whitelist: %s)", attr, self.method_whitelist) return wo = self.wrapped_object if not wo: log("wrapped object is no more, ignoring method call '%s'", attr) return method = getattr(wo, attr, None) if not method: log.warn("unknown command: '%s'", attr) log.warn(" packet: '%s'", repr_ellipsized(str(packet))) return if DEBUG_WRAPPER: log("calling %s.%s%s", wo, attr, str(tuple(packet[1:]))[:128]) self.idle_add(method, *packet[1:]) INJECT_FAULT(proto)
class subprocess_caller(object): """ This is the caller side, wrapping the subprocess. You can call send() to pass packets to it which will get converted to method calls on the receiving end, You can register for signals, in which case your callbacks will be called when those signals are forwarded back. (there is no validation of which signals are valid or not) """ def __init__(self, description="wrapper"): self.process = None self.protocol = None self.command = None self.description = description self.send_queue = Queue() self.signal_callbacks = {} self.large_packets = [] #hook a default packet handlers: self.connect(Protocol.CONNECTION_LOST, self.connection_lost) self.connect(Protocol.GIBBERISH, self.gibberish) glib = import_glib() self.idle_add = glib.idle_add self.timeout_add = glib.timeout_add self.source_remove = glib.source_remove def connect(self, signal, cb, *args): """ gobject style signal registration """ self.signal_callbacks.setdefault(signal, []).append((cb, list(args))) def subprocess_exit(self, *args): #beware: this may fire more than once! log("subprocess_exit%s command=%s", args, self.command) self._fire_callback("exit") def start(self): assert self.process is None, "already started" self.process = self.exec_subprocess() self.protocol = self.make_protocol() self.protocol.start() def abort_test(self, action): p = self.process if p is None or p.poll(): raise ConnectionClosedException("cannot %s: subprocess has terminated" % action) def make_protocol(self): #make a connection using the process stdin / stdout conn = TwoFileConnection(self.process.stdin, self.process.stdout, abort_test=self.abort_test, target=self.description, socktype=self.description, close_cb=self.subprocess_exit) conn.timeout = 0 protocol = Protocol(self, conn, self.process_packet, get_packet_cb=self.get_packet) setup_fastencoder_nocompression(protocol) protocol.large_packets = self.large_packets return protocol def exec_subprocess(self): kwargs = exec_kwargs() env = self.get_env() log("exec_subprocess() command=%s, env=%s, kwargs=%s", self.command, env, kwargs) proc = subprocess.Popen(self.command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=env, **kwargs) getChildReaper().add_process(proc, self.description, self.command, True, True, callback=self.subprocess_exit) return proc def get_env(self): env = exec_env() env["XPRA_LOG_PREFIX"] = "%s " % self.description env["XPRA_FIX_UNICODE_OUT"] = "0" return env def cleanup(self): self.stop() def stop(self): self.stop_process() self.stop_protocol() def stop_process(self): log("%s.stop_process() sending stop request to %s", self, self.description) proc = self.process if proc and proc.poll() is None: try: proc.terminate() self.process = None except Exception as e: log.warn("failed to stop the wrapped subprocess %s: %s", proc, e) def stop_protocol(self): p = self.protocol if p: self.protocol = None log("%s.stop_protocol() calling %s", self, p.close) try: p.close() except Exception as e: log.warn("failed to close the subprocess connection: %s", p, e) def connection_lost(self, *args): log("connection_lost%s", args) self.stop() def gibberish(self, *args): log.warn("%s stopping on gibberish:", self.description) log.warn(" %s", repr_ellipsized(args[1], limit=80)) self.stop() def get_packet(self): try: item = self.send_queue.get(False) except Exception: item = None return (item, None, None, None, False, self.send_queue.qsize()>0) def send(self, *packet_data): self.send_queue.put(packet_data) p = self.protocol if p: p.source_has_more() INJECT_FAULT(p) def process_packet(self, proto, packet): if DEBUG_WRAPPER: log("process_packet(%s, %s)", proto, [str(x)[:32] for x in packet]) signal_name = bytestostr(packet[0]) self._fire_callback(signal_name, packet[1:]) INJECT_FAULT(proto) def _fire_callback(self, signal_name, extra_args=()): callbacks = self.signal_callbacks.get(signal_name) log("firing callback for '%s': %s", signal_name, callbacks) if callbacks: for cb, args in callbacks: try: all_args = list(args) + list(extra_args) self.idle_add(cb, self, *all_args) except Exception: log.error("error processing callback %s for %s packet", cb, signal_name, exc_info=True)
class RFBProtocol(object): CONNECTION_LOST = "connection-lost" INVALID = "invalid" def __init__(self, scheduler, conn, auth, process_packet_cb, get_rfb_pixelformat, session_name="Xpra"): """ You must call this constructor and source_has_more() from the main thread. """ assert scheduler is not None assert conn is not None self.timeout_add = scheduler.timeout_add self.idle_add = scheduler.idle_add self._conn = conn self._authenticator = auth self._process_packet_cb = process_packet_cb self._get_rfb_pixelformat = get_rfb_pixelformat self.session_name = session_name self._write_queue = Queue() self._buffer = b"" self._challenge = None self.share = False #counters: self.input_packetcount = 0 self.input_raw_packetcount = 0 self.output_packetcount = 0 self.output_raw_packetcount = 0 self._protocol_version = () self._closed = False self._packet_parser = self._parse_protocol_handshake self._write_thread = None self._read_thread = make_thread(self._read_thread_loop, "read", daemon=True) def send_protocol_handshake(self): self.send(b"RFB 003.008\n") def _parse_invalid(self, packet): return len(packet) def _parse_protocol_handshake(self, packet): log("parse_protocol_handshake(%s)", nonl(packet)) if len(packet) < 12: return 0 if not packet.startswith(b'RFB '): self._invalid_header( packet, "invalid RFB protocol handshake packet header") return 0 #ie: packet==b'RFB 003.008\n' self._protocol_version = tuple( int(x) for x in packet[4:11].split(b".")) log.info("RFB version %s connection from %s", ".".join(str(x) for x in self._protocol_version), self._conn.target) if self._protocol_version != (3, 8): msg = "unsupported protocol version" log.error("Error: %s", msg) self.send(struct.pack(b"!BI", 0, len(msg)) + msg) self.invalid(msg, packet) return 0 #reply with Security Handshake: self._packet_parser = self._parse_security_handshake if self._authenticator and self._authenticator.requires_challenge(): security_types = [RFBAuth.VNC] else: security_types = [RFBAuth.NONE] packet = struct.pack(b"B", len(security_types)) for x in security_types: packet += struct.pack(b"B", x) self.send(packet) return 12 def _parse_security_handshake(self, packet): log("parse_security_handshake(%s)", hexstr(packet)) try: auth = struct.unpack(b"B", packet)[0] except: self._internal_error( packet, "cannot parse security handshake response '%s'" % hexstr(packet)) return 0 auth_str = RFBAuth.AUTH_STR.get(auth, auth) if auth == RFBAuth.VNC: #send challenge: self._packet_parser = self._parse_challenge assert self._authenticator challenge, digest = self._authenticator.get_challenge("des") assert digest == "des" self._challenge = challenge[:16] log("sending RFB challenge value: %s", hexstr(self._challenge)) self.send(self._challenge) return 1 if self._authenticator and self._authenticator.requires_challenge(): self._invalid_header( packet, "invalid security handshake response, authentication is required" ) return 0 log("parse_security_handshake: auth=%s, sending SecurityResult", auth_str) #Security Handshake, send SecurityResult Handshake self._packet_parser = self._parse_security_result self.send(struct.pack(b"!I", 0)) return 1 def _parse_challenge(self, response): assert self._authenticator log("parse_challenge(%s)", hexstr(response)) try: assert len(response) == 16 hex_response = hexstr(response) #log("padded password=%s", password) if self._authenticator.authenticate(hex_response): log("challenge authentication succeeded") self.send(struct.pack(b"!I", 0)) self._packet_parser = self._parse_security_result return 16 log.warn("Warning: authentication challenge response failure") log.warn(" password does not match") except Exception as e: log("parse_challenge(%s)", hexstr(response), exc_info=True) log.error("Error: authentication challenge failure:") log.error(" %s", e) self.timeout_add(1000, self.send_fail_challenge) return len(response) def send_fail_challenge(self): self.send(struct.pack(b"!I", 1)) self.close() def _parse_security_result(self, packet): self.share = packet != b"\0" log( "parse_security_result: sharing=%s, sending ClientInit with session-name=%s", self.share, self.session_name) #send ClientInit self._packet_parser = self._parse_rfb w, h, bpp, depth, bigendian, truecolor, rmax, gmax, bmax, rshift, bshift, gshift = self._get_rfb_pixelformat( ) packet = struct.pack( b"!HH" + PIXEL_FORMAT + b"I", w, h, bpp, depth, bigendian, truecolor, rmax, gmax, bmax, rshift, bshift, gshift, 0, 0, 0, len(self.session_name)) + strtobytes(self.session_name) self.send(packet) self._process_packet_cb(self, [b"authenticated"]) return 1 def _parse_rfb(self, packet): try: ptype = ord(packet[0]) except: ptype = packet[0] packet_type = RFBClientMessage.PACKET_TYPE_STR.get(ptype) if not packet_type: self.invalid("unknown RFB packet type: %#x" % ptype, packet) return 0 s = RFBClientMessage.PACKET_STRUCT.get(ptype) #ie: Struct("!BBBB") if not s: self.invalid("RFB packet type '%s' is not supported" % packet_type, packet) return 0 if len(packet) < s.size: return 0 size = s.size values = list(s.unpack(packet[:size])) values[0] = packet_type #some packets require parsing extra data: if ptype == RFBClientMessage.SETENCODINGS: N = values[2] estruct = struct.Struct(b"!" + b"i" * N) size += estruct.size if len(packet) < size: return 0 encodings = estruct.unpack(packet[s.size:size]) values.append(encodings) elif ptype == RFBClientMessage.CLIENTCUTTEXT: l = values[4] size += l if len(packet) < size: return 0 text = packet[s.size:size] values.append(text) self.input_packetcount += 1 log("RFB packet: %s: %s", packet_type, values[1:]) #now trigger the callback: self._process_packet_cb(self, values) #return part of packet not consumed: return size def __repr__(self): return "RFBProtocol(%s)" % self._conn def get_threads(self): return [ x for x in [self._write_thread, self._read_thread] if x is not None ] def get_info(self, *_args): info = {"protocol": self._protocol_version} for t in self.get_threads(): info.setdefault("thread", {})[t.name] = t.is_alive() return info def start(self): def start_network_read_thread(): if not self._closed: self._read_thread.start() self.idle_add(start_network_read_thread) def send_disconnect(self, *_args, **_kwargs): #no such packet in RFB, just close self.close() def queue_size(self): return self._write_queue.qsize() def send(self, packet): if self._closed: log("connection is closed already, not sending packet") return if log.is_debug_enabled(): if len(packet) <= 16: log("send(%i bytes: %s)", len(packet), hexstr(packet)) else: from xpra.simple_stats import std_unit log("send(%sBytes: %s..)", std_unit(len(packet)), hexstr(packet[:16])) if self._write_thread is None: self.start_write_thread() self._write_queue.put(packet) def start_write_thread(self): self._write_thread = start_thread(self._write_thread_loop, "write", daemon=True) def _io_thread_loop(self, name, callback): try: log("io_thread_loop(%s, %s) loop starting", name, callback) while not self._closed and callback(): pass log("io_thread_loop(%s, %s) loop ended, closed=%s", name, callback, self._closed) except ConnectionClosedException as e: log("%s closed", self._conn, exc_info=True) if not self._closed: #ConnectionClosedException means the warning has been logged already self._connection_lost("%s connection %s closed" % (name, self._conn)) except (OSError, IOError, socket_error) as e: if not self._closed: self._internal_error("%s connection %s reset" % (name, self._conn), e, exc_info=e.args[0] not in ABORT) except Exception as e: #can happen during close(), in which case we just ignore: if not self._closed: log.error("Error: %s on %s failed: %s", name, self._conn, type(e), exc_info=True) self.close() def _write_thread_loop(self): self._io_thread_loop("write", self._write) def _write(self): buf = self._write_queue.get() # Used to signal that we should exit: if buf is None: log("write thread: empty marker, exiting") self.close() return False con = self._conn if not con: return False while buf and not self._closed: written = con.write(buf) if written: buf = buf[written:] self.output_raw_packetcount += 1 self.output_packetcount += 1 return True def _read_thread_loop(self): self._io_thread_loop("read", self._read) def _read(self): c = self._conn if not c: return None buf = c.read(READ_BUFFER_SIZE) #log("read()=%s", repr_ellipsized(buf)) if not buf: log("read thread: eof") #give time to the parse thread to call close itself #so it has time to parse and process the last packet received self.timeout_add(1000, self.close) return False self.input_raw_packetcount += 1 self._buffer += buf #log("calling %s(%s)", self._packet_parser, repr_ellipsized(self._buffer)) while self._buffer: consumed = self._packet_parser(self._buffer) if consumed == 0: break self._buffer = self._buffer[consumed:] return True def _internal_error(self, message="", exc=None, exc_info=False): #log exception info with last log message if self._closed: return ei = exc_info if exc: ei = None #log it separately below log.error("Error: %s", message, exc_info=ei) if exc: log.error(" %s", exc, exc_info=exc_info) self.idle_add(self._connection_lost, message) def _connection_lost(self, message="", exc_info=False): log("connection lost: %s", message, exc_info=exc_info) self.close() return False def invalid(self, msg, data): self._packet_parser = self._parse_invalid self.idle_add(self._process_packet_cb, self, [RFBProtocol.INVALID, msg, data]) # Then hang up: self.timeout_add(1000, self._connection_lost, msg) #delegates to invalid_header() #(so this can more easily be intercepted and overriden # see tcp-proxy) def _invalid_header(self, data, msg=""): self.invalid_header(self, data, msg) def invalid_header(self, _proto, data, msg="invalid packet header"): self._packet_parser = self._parse_invalid err = "%s: '%s'" % (msg, hexstr(data[:8])) if len(data) > 1: err += " read buffer=%s (%i bytes)" % (repr_ellipsized(data), len(data)) self.invalid(err, data) def gibberish(self, msg, data): log("gibberish(%s, %r)", msg, data) self.close() def close(self): log("RFBProtocol.close() closed=%s, connection=%s", self._closed, self._conn) if self._closed: return self._closed = True #self.idle_add(self._process_packet_cb, self, [Protocol.CONNECTION_LOST]) c = self._conn if c: try: log("RFBProtocol.close() calling %s", c.close) c.close() except: log.error("error closing %s", self._conn, exc_info=True) self._conn = None self.terminate_queue_threads() self._process_packet_cb(self, [RFBProtocol.CONNECTION_LOST]) self.idle_add(self.clean) log("RFBProtocol.close() done") def clean(self): #clear all references to ensure we can get garbage collected quickly: self._write_thread = None self._read_thread = None self._process_packet_cb = None def terminate_queue_threads(self): log("terminate_queue_threads()") #make all the queue based threads exit by adding the empty marker: owq = self._write_queue self._write_queue = exit_queue() force_flush_queue(owq)
class ProxyInstanceProcess(Process): def __init__(self, uid, gid, env_options, session_options, socket_dir, video_encoder_modules, csc_modules, client_conn, disp_desc, client_state, cipher, encryption_key, server_conn, caps, message_queue): Process.__init__(self, name=str(client_conn)) self.uid = uid self.gid = gid self.env_options = env_options self.session_options = session_options self.socket_dir = socket_dir self.video_encoder_modules = video_encoder_modules self.csc_modules = csc_modules self.client_conn = client_conn self.disp_desc = disp_desc self.client_state = client_state self.cipher = cipher self.encryption_key = encryption_key self.server_conn = server_conn self.caps = caps log("ProxyProcess%s", (uid, gid, env_options, session_options, socket_dir, video_encoder_modules, csc_modules, client_conn, disp_desc, repr_ellipsized(str(client_state)), cipher, encryption_key, server_conn, "%s: %s.." % (type(caps), repr_ellipsized(str(caps))), message_queue)) self.client_protocol = None self.server_protocol = None self.exit = False self.main_queue = None self.message_queue = message_queue self.encode_queue = None #holds draw packets to encode self.encode_thread = None self.video_encoding_defs = None self.video_encoders = None self.video_encoders_last_used_time = None self.video_encoder_types = None self.video_helper = None self.lost_windows = None #for handling the local unix domain socket: self.control_socket_cleanup = None self.control_socket = None self.control_socket_thread = None self.control_socket_path = None self.potential_protocols = [] self.max_connections = MAX_CONCURRENT_CONNECTIONS def server_message_queue(self): while True: log("waiting for server message on %s", self.message_queue) m = self.message_queue.get() log("received proxy server message: %s", m) if m=="stop": self.stop("proxy server request") return elif m=="socket-handover-complete": log("setting sockets to blocking mode: %s", (self.client_conn, self.server_conn)) #set sockets to blocking mode: set_blocking(self.client_conn) set_blocking(self.server_conn) else: log.error("unexpected proxy server message: %s", m) def signal_quit(self, signum, frame): log.info("") log.info("proxy process pid %s got signal %s, exiting", os.getpid(), SIGNAMES.get(signum, signum)) self.exit = True signal.signal(signal.SIGINT, deadly_signal) signal.signal(signal.SIGTERM, deadly_signal) self.stop(SIGNAMES.get(signum, signum)) def idle_add(self, fn, *args, **kwargs): #we emulate gobject's idle_add using a simple queue self.main_queue.put((fn, args, kwargs)) def timeout_add(self, timeout, fn, *args, **kwargs): #emulate gobject's timeout_add using idle add and a Timer #using custom functions to cancel() the timer when needed def idle_exec(): v = fn(*args, **kwargs) if bool(v): self.timeout_add(timeout, fn, *args, **kwargs) return False def timer_exec(): #just run via idle_add: self.idle_add(idle_exec) Timer(timeout/1000.0, timer_exec).start() def run(self): log("ProxyProcess.run() pid=%s, uid=%s, gid=%s", os.getpid(), getuid(), getgid()) setuidgid(self.uid, self.gid) if self.env_options: #TODO: whitelist env update? os.environ.update(self.env_options) self.video_init() log.info("new proxy instance started") log.info(" for client %s", self.client_conn) log.info(" and server %s", self.server_conn) signal.signal(signal.SIGTERM, self.signal_quit) signal.signal(signal.SIGINT, self.signal_quit) log("registered signal handler %s", self.signal_quit) start_thread(self.server_message_queue, "server message queue") if not self.create_control_socket(): #TODO: should send a message to the client return self.control_socket_thread = start_thread(self.control_socket_loop, "control") self.main_queue = Queue() #setup protocol wrappers: self.server_packets = Queue(PROXY_QUEUE_SIZE) self.client_packets = Queue(PROXY_QUEUE_SIZE) self.client_protocol = Protocol(self, self.client_conn, self.process_client_packet, self.get_client_packet) self.client_protocol.restore_state(self.client_state) self.server_protocol = Protocol(self, self.server_conn, self.process_server_packet, self.get_server_packet) #server connection tweaks: self.server_protocol.large_packets.append("input-devices") self.server_protocol.large_packets.append("draw") self.server_protocol.large_packets.append("window-icon") self.server_protocol.large_packets.append("keymap-changed") self.server_protocol.large_packets.append("server-settings") if self.caps.boolget("file-transfer"): self.client_protocol.large_packets.append("send-file") self.client_protocol.large_packets.append("send-file-chunk") self.server_protocol.large_packets.append("send-file") self.server_protocol.large_packets.append("send-file-chunk") self.server_protocol.set_compression_level(self.session_options.get("compression_level", 0)) self.server_protocol.enable_default_encoder() self.lost_windows = set() self.encode_queue = Queue() self.encode_thread = start_thread(self.encode_loop, "encode") log("starting network threads") self.server_protocol.start() self.client_protocol.start() self.send_hello() self.timeout_add(VIDEO_TIMEOUT*1000, self.timeout_video_encoders) try: self.run_queue() except KeyboardInterrupt as e: self.stop(str(e)) finally: log("ProxyProcess.run() ending %s", os.getpid()) def video_init(self): enclog("video_init() loading codecs") load_codecs(decoders=False) enclog("video_init() will try video encoders: %s", csv(self.video_encoder_modules) or "none") self.video_helper = getVideoHelper() #only use video encoders (no CSC supported in proxy) self.video_helper.set_modules(video_encoders=self.video_encoder_modules) self.video_helper.init() self.video_encoding_defs = {} self.video_encoders = {} self.video_encoders_dst_formats = [] self.video_encoders_last_used_time = {} self.video_encoder_types = [] #figure out which encoders we want to proxy for (if any): encoder_types = set() for encoding in self.video_helper.get_encodings(): colorspace_specs = self.video_helper.get_encoder_specs(encoding) for colorspace, especs in colorspace_specs.items(): if colorspace not in ("BGRX", "BGRA", "RGBX", "RGBA"): #only deal with encoders that can handle plain RGB directly continue for spec in especs: #ie: video_spec("x264") spec_props = spec.to_dict() del spec_props["codec_class"] #not serializable! spec_props["score_boost"] = 50 #we want to win scoring so we get used ahead of other encoders spec_props["max_instances"] = 3 #limit to 3 video streams we proxy for (we really want 2, # but because of races with garbage collection, we need to allow more) #store it in encoding defs: self.video_encoding_defs.setdefault(encoding, {}).setdefault(colorspace, []).append(spec_props) encoder_types.add(spec.codec_type) enclog("encoder types found: %s", tuple(encoder_types)) #remove duplicates and use preferred order: order = PREFERRED_ENCODER_ORDER[:] for x in list(encoder_types): if x not in order: order.append(x) self.video_encoder_types = [x for x in order if x in encoder_types] enclog.info("proxy video encoders: %s", ", ".join(self.video_encoder_types)) def create_control_socket(self): assert self.socket_dir username = get_username_for_uid(self.uid) dotxpra = DotXpra(self.socket_dir, actual_username=username, uid=self.uid, gid=self.gid) sockpath = dotxpra.socket_path(":proxy-%s" % os.getpid()) state = dotxpra.get_server_state(sockpath) log("create_control_socket: socket path='%s', uid=%i, gid=%i, state=%s", sockpath, getuid(), getgid(), state) if state in (DotXpra.LIVE, DotXpra.UNKNOWN): log.error("Error: you already have a proxy server running at '%s'", sockpath) log.error(" the control socket will not be created") return False d = os.path.dirname(sockpath) try: dotxpra.mksockdir(d) except Exception as e: log.warn("Warning: failed to create socket directory '%s'", d) log.warn(" %s", e) try: sock, self.control_socket_cleanup = create_unix_domain_socket(sockpath, None, 0o600) sock.listen(5) except Exception as e: log("create_unix_domain_socket failed for '%s'", sockpath, exc_info=True) log.error("Error: failed to setup control socket '%s':", sockpath) log.error(" %s", e) return False self.control_socket = sock self.control_socket_path = sockpath log.info("proxy instance now also available using unix domain socket:") log.info(" %s", self.control_socket_path) return True def control_socket_loop(self): while not self.exit: log("waiting for connection on %s", self.control_socket_path) sock, address = self.control_socket.accept() self.new_control_connection(sock, address) def new_control_connection(self, sock, address): if len(self.potential_protocols)>=self.max_connections: log.error("too many connections (%s), ignoring new one", len(self.potential_protocols)) sock.close() return True try: peername = sock.getpeername() except: peername = str(address) sockname = sock.getsockname() target = peername or sockname #sock.settimeout(0) log("new_control_connection() sock=%s, sockname=%s, address=%s, peername=%s", sock, sockname, address, peername) sc = SocketConnection(sock, sockname, address, target, "unix-domain") log.info("New proxy instance control connection received: %s", sc) protocol = Protocol(self, sc, self.process_control_packet) protocol.large_packets.append("info-response") self.potential_protocols.append(protocol) protocol.enable_default_encoder() protocol.start() self.timeout_add(SOCKET_TIMEOUT*1000, self.verify_connection_accepted, protocol) return True def verify_connection_accepted(self, protocol): if not protocol._closed and protocol in self.potential_protocols: log.error("connection timedout: %s", protocol) self.send_disconnect(protocol, LOGIN_TIMEOUT) def process_control_packet(self, proto, packet): try: self.do_process_control_packet(proto, packet) except Exception as e: log.error("error processing control packet", exc_info=True) self.send_disconnect(proto, CONTROL_COMMAND_ERROR, str(e)) def do_process_control_packet(self, proto, packet): log("process_control_packet(%s, %s)", proto, packet) packet_type = packet[0] if packet_type==Protocol.CONNECTION_LOST: log.info("Connection lost") if proto in self.potential_protocols: self.potential_protocols.remove(proto) return if packet_type=="hello": caps = typedict(packet[1]) if caps.boolget("challenge"): self.send_disconnect(proto, AUTHENTICATION_ERROR, "this socket does not use authentication") return if caps.get("info_request", False): proto.send_now(("hello", self.get_proxy_info(proto))) self.timeout_add(5*1000, self.send_disconnect, proto, CLIENT_EXIT_TIMEOUT, "info sent") return elif caps.get("stop_request", False): self.stop("socket request", None) return elif caps.get("version_request", False): from xpra import __version__ proto.send_now(("hello", {"version" : __version__})) self.timeout_add(5*1000, self.send_disconnect, proto, CLIENT_EXIT_TIMEOUT, "version sent") return self.send_disconnect(proto, CONTROL_COMMAND_ERROR, "this socket only handles 'info', 'version' and 'stop' requests") def send_disconnect(self, proto, reason, *extra): log("send_disconnect(%s, %s, %s)", proto, reason, extra) if proto._closed: return proto.send_now(["disconnect", reason]+list(extra)) self.timeout_add(1000, self.force_disconnect, proto) def force_disconnect(self, proto): proto.close() def get_proxy_info(self, proto): sinfo = {} sinfo.update(get_server_info()) sinfo.update(get_thread_info(proto)) return {"proxy" : { "version" : local_version, "" : sinfo, }, "window" : self.get_window_info(), } def send_hello(self, challenge_response=None, client_salt=None): hello = self.filter_client_caps(self.caps) if challenge_response: hello.update({ "challenge_response" : challenge_response, "challenge_client_salt" : client_salt, }) self.queue_server_packet(("hello", hello)) def sanitize_session_options(self, options): d = {} def number(k, v): return parse_number(int, k, v) OPTION_WHITELIST = {"compression_level" : number, "lz4" : parse_bool, "lzo" : parse_bool, "zlib" : parse_bool, "rencode" : parse_bool, "bencode" : parse_bool, "yaml" : parse_bool} for k,v in options.items(): parser = OPTION_WHITELIST.get(k) if parser: log("trying to add %s=%s using %s", k, v, parser) try: d[k] = parser(k, v) except Exception as e: log.warn("failed to parse value %s for %s using %s: %s", v, k, parser, e) return d def filter_client_caps(self, caps): fc = self.filter_caps(caps, ("cipher", "challenge", "digest", "aliases", "compression", "lz4", "lz0", "zlib")) #the display string may override the username: username = self.disp_desc.get("username") if username: fc["username"] = username #update with options provided via config if any: fc.update(self.sanitize_session_options(self.session_options)) #add video proxies if any: fc["encoding.proxy.video"] = len(self.video_encoding_defs)>0 if self.video_encoding_defs: fc["encoding.proxy.video.encodings"] = self.video_encoding_defs return fc def filter_server_caps(self, caps): self.server_protocol.enable_encoder_from_caps(caps) return self.filter_caps(caps, ("aliases", )) def filter_caps(self, caps, prefixes): #removes caps that the proxy overrides / does not use: #(not very pythonic!) pcaps = {} removed = [] for k in caps.keys(): skip = len([e for e in prefixes if k.startswith(e)]) if skip==0: pcaps[k] = caps[k] else: removed.append(k) log("filtered out %s matching %s", removed, prefixes) #replace the network caps with the proxy's own: pcaps.update(flatten_dict(get_network_caps())) #then add the proxy info: updict(pcaps, "proxy", get_server_info(), flatten_dicts=True) pcaps["proxy"] = True pcaps["proxy.hostname"] = socket.gethostname() return pcaps def run_queue(self): log("run_queue() queue has %s items already in it", self.main_queue.qsize()) #process "idle_add"/"timeout_add" events in the main loop: while not self.exit: log("run_queue() size=%s", self.main_queue.qsize()) v = self.main_queue.get() if v is None: log("run_queue() None exit marker") break fn, args, kwargs = v log("run_queue() %s%s%s", fn, args, kwargs) try: v = fn(*args, **kwargs) if bool(v): #re-run it self.main_queue.put(v) except: log.error("error during main loop callback %s", fn, exc_info=True) self.exit = True #wait for connections to close down cleanly before we exit for i in range(10): if self.client_protocol._closed and self.server_protocol._closed: break if i==0: log.info("waiting for network connections to close") else: log("still waiting %i/10 - client.closed=%s, server.closed=%s", i+1, self.client_protocol._closed, self.server_protocol._closed) time.sleep(0.1) log.info("proxy instance %s stopped", os.getpid()) def stop(self, reason="proxy terminating", skip_proto=None): log.info("stop(%s, %s)", reason, skip_proto) self.exit = True try: self.control_socket.close() except: pass csc = self.control_socket_cleanup if csc: self.control_socket_cleanup = None csc() self.main_queue.put(None) #empty the main queue: q = Queue() q.put(None) self.main_queue = q #empty the encode queue: q = Queue() q.put(None) self.encode_queue = q for proto in (self.client_protocol, self.server_protocol): if proto and proto!=skip_proto: log("sending disconnect to %s", proto) proto.flush_then_close(["disconnect", SERVER_SHUTDOWN, reason]) def queue_client_packet(self, packet): log("queueing client packet: %s", packet[0]) self.client_packets.put(packet) self.client_protocol.source_has_more() def get_client_packet(self): #server wants a packet p = self.client_packets.get() log("sending to client: %s", p[0]) return p, None, None, self.client_packets.qsize()>0 def process_client_packet(self, proto, packet): packet_type = packet[0] log("process_client_packet: %s", packet_type) if packet_type==Protocol.CONNECTION_LOST: self.stop("client connection lost", proto) return elif packet_type=="disconnect": log("got disconnect from client: %s", packet[1]) if self.exit: self.client_protocol.close() else: self.stop("disconnect from client: %s" % packet[1]) elif packet_type=="set_deflate": #echo it back to the client: self.client_packets.put(packet) self.client_protocol.source_has_more() return elif packet_type=="hello": log.warn("Warning: invalid hello packet received after initial authentication (dropped)") return self.queue_server_packet(packet) def queue_server_packet(self, packet): log("queueing server packet: %s", packet[0]) self.server_packets.put(packet) self.server_protocol.source_has_more() def get_server_packet(self): #server wants a packet p = self.server_packets.get() log("sending to server: %s", p[0]) return p, None, None, self.server_packets.qsize()>0 def _packet_recompress(self, packet, index, name): if len(packet)>index: data = packet[index] if len(data)<512: packet[index] = str(data) return #FIXME: this is ugly and not generic! zlib = compression.use_zlib and self.caps.boolget("zlib", True) lz4 = compression.use_lz4 and self.caps.boolget("lz4", False) lzo = compression.use_lzo and self.caps.boolget("lzo", False) if zlib or lz4 or lzo: packet[index] = compressed_wrapper(name, data, zlib=zlib, lz4=lz4, lzo=lzo, can_inline=False) else: #prevent warnings about large uncompressed data packet[index] = Compressed("raw %s" % name, data, can_inline=True) def process_server_packet(self, proto, packet): packet_type = packet[0] log("process_server_packet: %s", packet_type) if packet_type==Protocol.CONNECTION_LOST: self.stop("server connection lost", proto) return elif packet_type=="disconnect": log("got disconnect from server: %s", packet[1]) if self.exit: self.server_protocol.close() else: self.stop("disconnect from server: %s" % packet[1]) elif packet_type=="hello": c = typedict(packet[1]) maxw, maxh = c.intpair("max_desktop_size", (4096, 4096)) caps = self.filter_server_caps(c) #add new encryption caps: if self.cipher: from xpra.net.crypto import crypto_backend_init, new_cipher_caps, DEFAULT_PADDING crypto_backend_init() padding_options = self.caps.strlistget("cipher.padding.options", [DEFAULT_PADDING]) auth_caps = new_cipher_caps(self.client_protocol, self.cipher, self.encryption_key, padding_options) caps.update(auth_caps) #may need to bump packet size: proto.max_packet_size = maxw*maxh*4*4 file_transfer = self.caps.boolget("file-transfer") and c.boolget("file-transfer") file_size_limit = max(self.caps.intget("file-size-limit"), c.intget("file-size-limit")) file_max_packet_size = int(file_transfer) * (1024 + file_size_limit*1024*1024) self.client_protocol.max_packet_size = max(self.client_protocol.max_packet_size, file_max_packet_size) self.server_protocol.max_packet_size = max(self.server_protocol.max_packet_size, file_max_packet_size) packet = ("hello", caps) elif packet_type=="info-response": #adds proxy info: #note: this is only seen by the client application #"xpra info" is a new connection, which talks to the proxy server... info = packet[1] info.update(self.get_proxy_info(proto)) elif packet_type=="lost-window": wid = packet[1] #mark it as lost so we can drop any current/pending frames self.lost_windows.add(wid) #queue it so it gets cleaned safely (for video encoders mostly): self.encode_queue.put(packet) #and fall through so tell the client immediately elif packet_type=="draw": #use encoder thread: self.encode_queue.put(packet) #which will queue the packet itself when done: return #we do want to reformat cursor packets... #as they will have been uncompressed by the network layer already: elif packet_type=="cursor": #packet = ["cursor", x, y, width, height, xhot, yhot, serial, pixels, name] #or: #packet = ["cursor", "png", x, y, width, height, xhot, yhot, serial, pixels, name] #or: #packet = ["cursor", ""] if len(packet)>=8: #hard to distinguish png cursors from normal cursors... try: int(packet[1]) self._packet_recompress(packet, 8, "cursor") except: self._packet_recompress(packet, 9, "cursor") elif packet_type=="window-icon": self._packet_recompress(packet, 5, "icon") elif packet_type=="send-file": if packet[6]: packet[6] = Compressed("file-data", packet[6]) elif packet_type=="send-file-chunk": if packet[3]: packet[3] = Compressed("file-chunk-data", packet[3]) elif packet_type=="challenge": from xpra.net.crypto import get_salt #client may have already responded to the challenge, #so we have to handle authentication from this end salt = packet[1] digest = packet[3] client_salt = get_salt(len(salt)) salt = xor_str(salt, client_salt) if digest!=b"hmac": self.stop("digest mode '%s' not supported", std(digest)) return password = self.disp_desc.get("password", self.session_options.get("password")) log("password from %s / %s = %s", self.disp_desc, self.session_options, password) if not password: self.stop("authentication requested by the server, but no password available for this session") return import hmac, hashlib password = strtobytes(password) salt = strtobytes(salt) challenge_response = hmac.HMAC(password, salt, digestmod=hashlib.md5).hexdigest() log.info("sending %s challenge response", digest) self.send_hello(challenge_response, client_salt) return self.queue_client_packet(packet) def encode_loop(self): """ thread for slower encoding related work """ while not self.exit: packet = self.encode_queue.get() if packet is None: return try: packet_type = packet[0] if packet_type=="lost-window": wid = packet[1] self.lost_windows.remove(wid) ve = self.video_encoders.get(wid) if ve: del self.video_encoders[wid] del self.video_encoders_last_used_time[wid] ve.clean() elif packet_type=="draw": #modify the packet with the video encoder: if self.process_draw(packet): #then send it as normal: self.queue_client_packet(packet) elif packet_type=="check-video-timeout": #not a real packet, this is added by the timeout check: wid = packet[1] ve = self.video_encoders.get(wid) now = time.time() idle_time = now-self.video_encoders_last_used_time.get(wid) if ve and idle_time>VIDEO_TIMEOUT: enclog("timing out the video encoder context for window %s", wid) #timeout is confirmed, we are in the encoding thread, #so it is now safe to clean it up: ve.clean() del self.video_encoders[wid] del self.video_encoders_last_used_time[wid] else: enclog.warn("unexpected encode packet: %s", packet_type) except: enclog.warn("error encoding packet", exc_info=True) def process_draw(self, packet): wid, x, y, width, height, encoding, pixels, _, rowstride, client_options = packet[1:11] #never modify mmap packets if encoding in ("mmap", "scroll"): return True #we have a proxy video packet: rgb_format = client_options.get("rgb_format", "") enclog("proxy draw: client_options=%s", client_options) def send_updated(encoding, compressed_data, updated_client_options): #update the packet with actual encoding data used: packet[6] = encoding packet[7] = compressed_data packet[10] = updated_client_options enclog("returning %s bytes from %s, options=%s", len(compressed_data), len(pixels), updated_client_options) return (wid not in self.lost_windows) def passthrough(strip_alpha=True): enclog("proxy draw: %s passthrough (rowstride: %s vs %s, strip alpha=%s)", rgb_format, rowstride, client_options.get("rowstride", 0), strip_alpha) if strip_alpha: #passthrough as plain RGB: Xindex = rgb_format.upper().find("X") if Xindex>=0 and len(rgb_format)==4: #force clear alpha (which may be garbage): newdata = bytearray(pixels) for i in range(len(pixels)/4): newdata[i*4+Xindex] = chr(255) packet[9] = client_options.get("rowstride", 0) cdata = bytes(newdata) else: cdata = pixels new_client_options = {"rgb_format" : rgb_format} else: #preserve cdata = pixels new_client_options = client_options wrapped = Compressed("%s pixels" % encoding, cdata) #FIXME: we should not assume that rgb32 is supported here... #(we may have to convert to rgb24..) return send_updated("rgb32", wrapped, new_client_options) proxy_video = client_options.get("proxy", False) if PASSTHROUGH and (encoding in ("rgb32", "rgb24") or proxy_video): #we are dealing with rgb data, so we can pass it through: return passthrough(proxy_video) elif not self.video_encoder_types or not client_options or not proxy_video: #ensure we don't try to re-compress the pixel data in the network layer: #(re-add the "compressed" marker that gets lost when we re-assemble packets) packet[7] = Compressed("%s pixels" % encoding, packet[7]) return True #video encoding: find existing encoder ve = self.video_encoders.get(wid) if ve: if ve in self.lost_windows: #we cannot clean the video encoder here, there may be more frames queue up #"lost-window" in encode_loop will take care of it safely return False #we must verify that the encoder is still valid #and scrap it if not (ie: when window is resized) if ve.get_width()!=width or ve.get_height()!=height: enclog("closing existing video encoder %s because dimensions have changed from %sx%s to %sx%s", ve, ve.get_width(), ve.get_height(), width, height) ve.clean() ve = None elif ve.get_encoding()!=encoding: enclog("closing existing video encoder %s because encoding has changed from %s to %s", ve.get_encoding(), encoding) ve.clean() ve = None #scaling and depth are proxy-encoder attributes: scaling = client_options.get("scaling", (1, 1)) depth = client_options.get("depth", 24) rowstride = client_options.get("rowstride", rowstride) quality = client_options.get("quality", -1) speed = client_options.get("speed", -1) timestamp = client_options.get("timestamp") image = ImageWrapper(x, y, width, height, pixels, rgb_format, depth, rowstride, planes=ImageWrapper.PACKED) if timestamp is not None: image.set_timestamp(timestamp) #the encoder options are passed through: encoder_options = client_options.get("options", {}) if not ve: #make a new video encoder: spec = self._find_video_encoder(encoding, rgb_format) if spec is None: #no video encoder! enc_pillow = get_codec("enc_pillow") if not enc_pillow: from xpra.server.picture_encode import warn_encoding_once warn_encoding_once("no-video-no-PIL", "no video encoder found for rgb format %s, sending as plain RGB!" % rgb_format) return passthrough(True) enclog("no video encoder available: sending as jpeg") coding, compressed_data, client_options, _, _, _, _ = enc_pillow.encode("jpeg", image, quality, speed, False) return send_updated(coding, compressed_data, client_options) enclog("creating new video encoder %s for window %s", spec, wid) ve = spec.make_instance() #dst_formats is specified with first frame only: dst_formats = client_options.get("dst_formats") if dst_formats is not None: #save it in case we timeout the video encoder, #so we can instantiate it again, even from a frame no>1 self.video_encoders_dst_formats = dst_formats else: assert self.video_encoders_dst_formats, "BUG: dst_formats not specified for proxy and we don't have it either" dst_formats = self.video_encoders_dst_formats ve.init_context(width, height, rgb_format, dst_formats, encoding, quality, speed, scaling, {}) self.video_encoders[wid] = ve self.video_encoders_last_used_time[wid] = time.time() #just to make sure this is always set #actual video compression: enclog("proxy compression using %s with quality=%s, speed=%s", ve, quality, speed) data, out_options = ve.compress_image(image, quality, speed, encoder_options) #pass through some options if we don't have them from the encoder #(maybe we should also use the "pts" from the real server?) for k in ("timestamp", "rgb_format", "depth", "csc"): if k not in out_options and k in client_options: out_options[k] = client_options[k] self.video_encoders_last_used_time[wid] = time.time() return send_updated(ve.get_encoding(), Compressed(encoding, data), out_options) def timeout_video_encoders(self): #have to be careful as another thread may come in... #so we just ask the encode thread (which deals with encoders already) #to do what may need to be done if we find a timeout: now = time.time() for wid in list(self.video_encoders_last_used_time.keys()): idle_time = int(now-self.video_encoders_last_used_time.get(wid)) if idle_time is None: continue enclog("timeout_video_encoders() wid=%s, idle_time=%s", wid, idle_time) if idle_time and idle_time>VIDEO_TIMEOUT: self.encode_queue.put(["check-video-timeout", wid]) return True #run again def _find_video_encoder(self, encoding, rgb_format): #try the one specified first, then all the others: try_encodings = [encoding] + [x for x in self.video_helper.get_encodings() if x!=encoding] for encoding in try_encodings: colorspace_specs = self.video_helper.get_encoder_specs(encoding) especs = colorspace_specs.get(rgb_format) if len(especs)==0: continue for etype in self.video_encoder_types: for spec in especs: if etype==spec.codec_type: enclog("_find_video_encoder(%s, %s)=%s", encoding, rgb_format, spec) return spec enclog("_find_video_encoder(%s, %s) not found", encoding, rgb_format) return None def get_window_info(self): info = {} now = time.time() for wid, encoder in self.video_encoders.items(): einfo = encoder.get_info() einfo["idle_time"] = int(now-self.video_encoders_last_used_time.get(wid, 0)) info[wid] = { "proxy" : { "" : encoder.get_type(), "encoder" : einfo }, } enclog("get_window_info()=%s", info) return info
class subprocess_caller(object): """ This is the caller side, wrapping the subprocess. You can call send() to pass packets to it which will get converted to method calls on the receiving end, You can register for signals, in which case your callbacks will be called when those signals are forwarded back. (there is no validation of which signals are valid or not) """ def __init__(self, description="wrapper"): self.process = None self.protocol = None self.command = None self.description = description self.send_queue = Queue() self.signal_callbacks = {} self.large_packets = [] #hook a default packet handlers: self.connect(Protocol.CONNECTION_LOST, self.connection_lost) self.connect(Protocol.GIBBERISH, self.gibberish) def connect(self, signal, cb, *args): """ gobject style signal registration """ self.signal_callbacks.setdefault(signal, []).append((cb, list(args))) def subprocess_exit(self, *args): #beware: this may fire more than once! log("subprocess_exit%s command=%s", args, self.command) self._fire_callback("exit") def start(self): self.process = self.exec_subprocess() self.protocol = self.make_protocol() self.protocol.start() def make_protocol(self): #make a connection using the process stdin / stdout conn = TwoFileConnection(self.process.stdin, self.process.stdout, abort_test=None, target=self.description, info=self.description, close_cb=self.subprocess_exit) conn.timeout = 0 protocol = Protocol(gobject, conn, self.process_packet, get_packet_cb=self.get_packet) #we assume the other end has the same encoders (which is reasonable): #TODO: fallback to bencoder try: protocol.enable_encoder("rencode") except Exception as e: log.warn("failed to enable rencode: %s", e) protocol.enable_encoder("bencode") #we assume this is local, so no compression: protocol.enable_compressor("none") protocol.large_packets = self.large_packets return protocol def exec_subprocess(self): kwargs = self.exec_kwargs() log("exec_subprocess() command=%s, kwargs=%s", self.command, kwargs) proc = subprocess.Popen(self.command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=sys.stderr.fileno(), env=self.get_env(), **kwargs) getChildReaper().add_process(proc, self.description, self.command, True, True, callback=self.subprocess_exit) return proc def get_env(self): env = os.environ.copy() env["XPRA_SKIP_UI"] = "1" env["XPRA_LOG_PREFIX"] = "%s " % self.description #let's make things more complicated than they should be: #on win32, the environment can end up containing unicode, and subprocess chokes on it for k,v in env.items(): try: env[k] = bytestostr(v.encode("utf8")) except: env[k] = bytestostr(v) return env def exec_kwargs(self): if os.name=="posix": return {"close_fds" : True} elif sys.platform.startswith("win"): if not WIN32_SHOWWINDOW: startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW return {"startupinfo" : startupinfo} return {} def cleanup(self): self.stop() def stop(self): self.stop_process() #call via idle_add to prevent deadlocks on win32! gobject.idle_add(self.stop_protocol) def stop_process(self): log("stop() sending stop request to %s", self.description) proc = self.process if proc and proc.poll() is None: try: proc.terminate() self.process = None except Exception as e: log.warn("failed to stop the wrapped subprocess %s: %s", proc, e) def stop_protocol(self): p = self.protocol if p: self.protocol = None log("%s.stop() calling %s", self, p.close) try: p.close() except Exception as e: log.warn("failed to close the subprocess connection: %s", p, e) def connection_lost(self, *args): log("connection_lost%s", args) self.stop() def gibberish(self, *args): log("gibberish%s", args) self.stop() def get_packet(self): try: item = self.send_queue.get(False) except: item = None return (item, None, None, self.send_queue.qsize()>0) def send(self, *packet_data): self.send_queue.put(packet_data) p = self.protocol if p: p.source_has_more() def process_packet(self, proto, packet): if DEBUG_WRAPPER: log("process_packet(%s, %s)", proto, [str(x)[:32] for x in packet]) signal_name = bytestostr(packet[0]) self._fire_callback(signal_name, packet[1:]) def _fire_callback(self, signal_name, extra_args=[]): callbacks = self.signal_callbacks.get(signal_name) log("firing callback for %s: %s", signal_name, callbacks) if callbacks: for cb, args in callbacks: try: all_args = list(args) + extra_args gobject.idle_add(cb, self, *all_args) except Exception: log.error("error processing callback %s for %s packet", cb, signal_name, exc_info=True)
class ProxyInstanceProcess(Process): def __init__(self, uid, gid, env_options, session_options, socket_dir, video_encoder_modules, csc_modules, client_conn, client_state, cipher, encryption_key, server_conn, caps, message_queue): Process.__init__(self, name=str(client_conn)) self.uid = uid self.gid = gid self.env_options = env_options self.session_options = session_options self.socket_dir = socket_dir self.video_encoder_modules = video_encoder_modules self.csc_modules = csc_modules self.client_conn = client_conn self.client_state = client_state self.cipher = cipher self.encryption_key = encryption_key self.server_conn = server_conn self.caps = caps log("ProxyProcess%s", (uid, gid, env_options, session_options, socket_dir, video_encoder_modules, csc_modules, client_conn, repr_ellipsized(str(client_state)), cipher, encryption_key, server_conn, "%s: %s.." % (type(caps), repr_ellipsized(str(caps))), message_queue)) self.client_protocol = None self.server_protocol = None self.exit = False self.main_queue = None self.message_queue = message_queue self.encode_queue = None #holds draw packets to encode self.encode_thread = None self.video_encoding_defs = None self.video_encoders = None self.video_encoders_last_used_time = None self.video_encoder_types = None self.video_helper = None self.lost_windows = None #for handling the local unix domain socket: self.control_socket_cleanup = None self.control_socket = None self.control_socket_thread = None self.control_socket_path = None self.potential_protocols = [] self.max_connections = MAX_CONCURRENT_CONNECTIONS def server_message_queue(self): while True: log("waiting for server message on %s", self.message_queue) m = self.message_queue.get() log("received proxy server message: %s", m) if m=="stop": self.stop("proxy server request") return elif m=="socket-handover-complete": log("setting sockets to blocking mode: %s", (self.client_conn, self.server_conn)) #set sockets to blocking mode: set_blocking(self.client_conn) set_blocking(self.server_conn) else: log.error("unexpected proxy server message: %s", m) def signal_quit(self, signum, frame): log.info("") log.info("proxy process pid %s got signal %s, exiting", os.getpid(), SIGNAMES.get(signum, signum)) self.exit = True signal.signal(signal.SIGINT, deadly_signal) signal.signal(signal.SIGTERM, deadly_signal) self.stop(SIGNAMES.get(signum, signum)) def idle_add(self, fn, *args, **kwargs): #we emulate gobject's idle_add using a simple queue self.main_queue.put((fn, args, kwargs)) def timeout_add(self, timeout, fn, *args, **kwargs): #emulate gobject's timeout_add using idle add and a Timer #using custom functions to cancel() the timer when needed def idle_exec(): v = fn(*args, **kwargs) if bool(v): self.timeout_add(timeout, fn, *args, **kwargs) return False def timer_exec(): #just run via idle_add: self.idle_add(idle_exec) Timer(timeout/1000.0, timer_exec).start() def run(self): log("ProxyProcess.run() pid=%s, uid=%s, gid=%s", os.getpid(), getuid(), getgid()) setuidgid(self.uid, self.gid) if self.env_options: #TODO: whitelist env update? os.environ.update(self.env_options) self.video_init() log.info("new proxy instance started") log.info(" for client %s", self.client_conn) log.info(" and server %s", self.server_conn) signal.signal(signal.SIGTERM, self.signal_quit) signal.signal(signal.SIGINT, self.signal_quit) log("registered signal handler %s", self.signal_quit) start_thread(self.server_message_queue, "server message queue") if not self.create_control_socket(): #TODO: should send a message to the client return self.control_socket_thread = start_thread(self.control_socket_loop, "control") self.main_queue = Queue() #setup protocol wrappers: self.server_packets = Queue(PROXY_QUEUE_SIZE) self.client_packets = Queue(PROXY_QUEUE_SIZE) self.client_protocol = Protocol(self, self.client_conn, self.process_client_packet, self.get_client_packet) self.client_protocol.restore_state(self.client_state) self.server_protocol = Protocol(self, self.server_conn, self.process_server_packet, self.get_server_packet) #server connection tweaks: self.server_protocol.large_packets.append("draw") self.server_protocol.large_packets.append("window-icon") self.server_protocol.large_packets.append("keymap-changed") self.server_protocol.large_packets.append("server-settings") if self.caps.boolget("file-transfer"): self.client_protocol.large_packets.append("send-file") self.client_protocol.large_packets.append("send-file-chunk") self.server_protocol.large_packets.append("send-file") self.server_protocol.large_packets.append("send-file-chunk") self.server_protocol.set_compression_level(self.session_options.get("compression_level", 0)) self.server_protocol.enable_default_encoder() self.lost_windows = set() self.encode_queue = Queue() self.encode_thread = start_thread(self.encode_loop, "encode") log("starting network threads") self.server_protocol.start() self.client_protocol.start() self.send_hello() self.timeout_add(VIDEO_TIMEOUT*1000, self.timeout_video_encoders) try: self.run_queue() except KeyboardInterrupt as e: self.stop(str(e)) finally: log("ProxyProcess.run() ending %s", os.getpid()) def video_init(self): enclog("video_init() loading codecs") load_codecs(decoders=False) enclog("video_init() will try video encoders: %s", csv(self.video_encoder_modules) or "none") self.video_helper = getVideoHelper() #only use video encoders (no CSC supported in proxy) self.video_helper.set_modules(video_encoders=self.video_encoder_modules) self.video_helper.init() self.video_encoding_defs = {} self.video_encoders = {} self.video_encoders_dst_formats = [] self.video_encoders_last_used_time = {} self.video_encoder_types = [] #figure out which encoders we want to proxy for (if any): encoder_types = set() for encoding in self.video_helper.get_encodings(): colorspace_specs = self.video_helper.get_encoder_specs(encoding) for colorspace, especs in colorspace_specs.items(): if colorspace not in ("BGRX", "BGRA", "RGBX", "RGBA"): #only deal with encoders that can handle plain RGB directly continue for spec in especs: #ie: video_spec("x264") spec_props = spec.to_dict() del spec_props["codec_class"] #not serializable! spec_props["score_boost"] = 50 #we want to win scoring so we get used ahead of other encoders spec_props["max_instances"] = 3 #limit to 3 video streams we proxy for (we really want 2, # but because of races with garbage collection, we need to allow more) #store it in encoding defs: self.video_encoding_defs.setdefault(encoding, {}).setdefault(colorspace, []).append(spec_props) encoder_types.add(spec.codec_type) enclog("encoder types found: %s", tuple(encoder_types)) #remove duplicates and use preferred order: order = PREFERRED_ENCODER_ORDER[:] for x in list(encoder_types): if x not in order: order.append(x) self.video_encoder_types = [x for x in order if x in encoder_types] enclog.info("proxy video encoders: %s", ", ".join(self.video_encoder_types)) def create_control_socket(self): assert self.socket_dir dotxpra = DotXpra(self.socket_dir) sockpath = dotxpra.socket_path(":proxy-%s" % os.getpid()) state = dotxpra.get_server_state(sockpath) if state in (DotXpra.LIVE, DotXpra.UNKNOWN): log.error("Error: you already have a proxy server running at '%s'", sockpath) log.error(" the control socket will not be created") return False log("create_control_socket: socket path='%s', uid=%i, gid=%i", sockpath, getuid(), getgid()) try: sock, self.control_socket_cleanup = create_unix_domain_socket(sockpath, None, 0o600) sock.listen(5) except Exception as e: log("create_unix_domain_socket failed for '%s'", sockpath, exc_info=True) log.error("Error: failed to setup control socket '%s':", sockpath) log.error(" %s", e) return False self.control_socket = sock self.control_socket_path = sockpath log.info("proxy instance now also available using unix domain socket:") log.info(" %s", self.control_socket_path) return True def control_socket_loop(self): while not self.exit: log("waiting for connection on %s", self.control_socket_path) sock, address = self.control_socket.accept() self.new_control_connection(sock, address) def new_control_connection(self, sock, address): if len(self.potential_protocols)>=self.max_connections: log.error("too many connections (%s), ignoring new one", len(self.potential_protocols)) sock.close() return True try: peername = sock.getpeername() except: peername = str(address) sockname = sock.getsockname() target = peername or sockname #sock.settimeout(0) log("new_control_connection() sock=%s, sockname=%s, address=%s, peername=%s", sock, sockname, address, peername) sc = SocketConnection(sock, sockname, address, target, "unix-domain") log.info("New proxy instance control connection received: %s", sc) protocol = Protocol(self, sc, self.process_control_packet) protocol.large_packets.append("info-response") self.potential_protocols.append(protocol) protocol.enable_default_encoder() protocol.start() self.timeout_add(SOCKET_TIMEOUT*1000, self.verify_connection_accepted, protocol) return True def verify_connection_accepted(self, protocol): if not protocol._closed and protocol in self.potential_protocols: log.error("connection timedout: %s", protocol) self.send_disconnect(protocol, LOGIN_TIMEOUT) def process_control_packet(self, proto, packet): try: self.do_process_control_packet(proto, packet) except Exception as e: log.error("error processing control packet", exc_info=True) self.send_disconnect(proto, CONTROL_COMMAND_ERROR, str(e)) def do_process_control_packet(self, proto, packet): log("process_control_packet(%s, %s)", proto, packet) packet_type = packet[0] if packet_type==Protocol.CONNECTION_LOST: log.info("Connection lost") if proto in self.potential_protocols: self.potential_protocols.remove(proto) return if packet_type=="hello": caps = typedict(packet[1]) if caps.boolget("challenge"): self.send_disconnect(proto, AUTHENTICATION_ERROR, "this socket does not use authentication") return if caps.get("info_request", False): proto.send_now(("hello", self.get_proxy_info(proto))) self.timeout_add(5*1000, self.send_disconnect, proto, CLIENT_EXIT_TIMEOUT, "info sent") return elif caps.get("stop_request", False): self.stop("socket request", None) return elif caps.get("version_request", False): from xpra import __version__ proto.send_now(("hello", {"version" : __version__})) self.timeout_add(5*1000, self.send_disconnect, proto, CLIENT_EXIT_TIMEOUT, "version sent") return self.send_disconnect(proto, CONTROL_COMMAND_ERROR, "this socket only handles 'info', 'version' and 'stop' requests") def send_disconnect(self, proto, reason, *extra): log("send_disconnect(%s, %s, %s)", proto, reason, extra) if proto._closed: return proto.send_now(["disconnect", reason]+list(extra)) self.timeout_add(1000, self.force_disconnect, proto) def force_disconnect(self, proto): proto.close() def get_proxy_info(self, proto): sinfo = {} sinfo.update(get_server_info()) sinfo.update(get_thread_info(proto)) return {"proxy" : { "version" : local_version, "" : sinfo, }, "window" : self.get_window_info(), } def send_hello(self, challenge_response=None, client_salt=None): hello = self.filter_client_caps(self.caps) if challenge_response: hello.update({ "challenge_response" : challenge_response, "challenge_client_salt" : client_salt, }) self.queue_server_packet(("hello", hello)) def sanitize_session_options(self, options): d = {} def number(k, v): return parse_number(int, k, v) OPTION_WHITELIST = {"compression_level" : number, "lz4" : parse_bool, "lzo" : parse_bool, "zlib" : parse_bool, "rencode" : parse_bool, "bencode" : parse_bool, "yaml" : parse_bool} for k,v in options.items(): parser = OPTION_WHITELIST.get(k) if parser: log("trying to add %s=%s using %s", k, v, parser) try: d[k] = parser(k, v) except Exception as e: log.warn("failed to parse value %s for %s using %s: %s", v, k, parser, e) return d def filter_client_caps(self, caps): fc = self.filter_caps(caps, ("cipher", "challenge", "digest", "aliases", "compression", "lz4", "lz0", "zlib")) #update with options provided via config if any: fc.update(self.sanitize_session_options(self.session_options)) #add video proxies if any: fc["encoding.proxy.video"] = len(self.video_encoding_defs)>0 if self.video_encoding_defs: fc["encoding.proxy.video.encodings"] = self.video_encoding_defs return fc def filter_server_caps(self, caps): self.server_protocol.enable_encoder_from_caps(caps) return self.filter_caps(caps, ("aliases", )) def filter_caps(self, caps, prefixes): #removes caps that the proxy overrides / does not use: #(not very pythonic!) pcaps = {} removed = [] for k in caps.keys(): skip = len([e for e in prefixes if k.startswith(e)]) if skip==0: pcaps[k] = caps[k] else: removed.append(k) log("filtered out %s matching %s", removed, prefixes) #replace the network caps with the proxy's own: pcaps.update(flatten_dict(get_network_caps())) #then add the proxy info: updict(pcaps, "proxy", get_server_info(), flatten_dicts=True) pcaps["proxy"] = True pcaps["proxy.hostname"] = socket.gethostname() return pcaps def run_queue(self): log("run_queue() queue has %s items already in it", self.main_queue.qsize()) #process "idle_add"/"timeout_add" events in the main loop: while not self.exit: log("run_queue() size=%s", self.main_queue.qsize()) v = self.main_queue.get() if v is None: log("run_queue() None exit marker") break fn, args, kwargs = v log("run_queue() %s%s%s", fn, args, kwargs) try: v = fn(*args, **kwargs) if bool(v): #re-run it self.main_queue.put(v) except: log.error("error during main loop callback %s", fn, exc_info=True) self.exit = True #wait for connections to close down cleanly before we exit for i in range(10): if self.client_protocol._closed and self.server_protocol._closed: break if i==0: log.info("waiting for network connections to close") else: log("still waiting %i/10 - client.closed=%s, server.closed=%s", i+1, self.client_protocol._closed, self.server_protocol._closed) time.sleep(0.1) log.info("proxy instance %s stopped", os.getpid()) def stop(self, reason="proxy terminating", skip_proto=None): log.info("stop(%s, %s)", reason, skip_proto) self.exit = True try: self.control_socket.close() except: pass csc = self.control_socket_cleanup if csc: self.control_socket_cleanup = None csc() self.main_queue.put(None) #empty the main queue: q = Queue() q.put(None) self.main_queue = q #empty the encode queue: q = Queue() q.put(None) self.encode_queue = q for proto in (self.client_protocol, self.server_protocol): if proto and proto!=skip_proto: log("sending disconnect to %s", proto) proto.flush_then_close(["disconnect", SERVER_SHUTDOWN, reason]) def queue_client_packet(self, packet): log("queueing client packet: %s", packet[0]) self.client_packets.put(packet) self.client_protocol.source_has_more() def get_client_packet(self): #server wants a packet p = self.client_packets.get() log("sending to client: %s", p[0]) return p, None, None, self.client_packets.qsize()>0 def process_client_packet(self, proto, packet): packet_type = packet[0] log("process_client_packet: %s", packet_type) if packet_type==Protocol.CONNECTION_LOST: self.stop("client connection lost", proto) return elif packet_type=="disconnect": log("got disconnect from client: %s", packet[1]) if self.exit: self.client_protocol.close() else: self.stop("disconnect from client: %s" % packet[1]) elif packet_type=="set_deflate": #echo it back to the client: self.client_packets.put(packet) self.client_protocol.source_has_more() return elif packet_type=="hello": log.warn("Warning: invalid hello packet received after initial authentication (dropped)") return self.queue_server_packet(packet) def queue_server_packet(self, packet): log("queueing server packet: %s", packet[0]) self.server_packets.put(packet) self.server_protocol.source_has_more() def get_server_packet(self): #server wants a packet p = self.server_packets.get() log("sending to server: %s", p[0]) return p, None, None, self.server_packets.qsize()>0 def _packet_recompress(self, packet, index, name): if len(packet)>index: data = packet[index] if len(data)<512: packet[index] = str(data) return #FIXME: this is ugly and not generic! zlib = compression.use_zlib and self.caps.boolget("zlib", True) lz4 = compression.use_lz4 and self.caps.boolget("lz4", False) lzo = compression.use_lzo and self.caps.boolget("lzo", False) if zlib or lz4 or lzo: packet[index] = compressed_wrapper(name, data, zlib=zlib, lz4=lz4, lzo=lzo, can_inline=False) else: #prevent warnings about large uncompressed data packet[index] = Compressed("raw %s" % name, data, can_inline=True) def process_server_packet(self, proto, packet): packet_type = packet[0] log("process_server_packet: %s", packet_type) if packet_type==Protocol.CONNECTION_LOST: self.stop("server connection lost", proto) return elif packet_type=="disconnect": log("got disconnect from server: %s", packet[1]) if self.exit: self.server_protocol.close() else: self.stop("disconnect from server: %s" % packet[1]) elif packet_type=="hello": c = typedict(packet[1]) maxw, maxh = c.intpair("max_desktop_size", (4096, 4096)) caps = self.filter_server_caps(c) #add new encryption caps: if self.cipher: from xpra.net.crypto import crypto_backend_init, new_cipher_caps, DEFAULT_PADDING crypto_backend_init() padding_options = self.caps.strlistget("cipher.padding.options", [DEFAULT_PADDING]) auth_caps = new_cipher_caps(self.client_protocol, self.cipher, self.encryption_key, padding_options) caps.update(auth_caps) #may need to bump packet size: proto.max_packet_size = maxw*maxh*4*4 file_transfer = self.caps.boolget("file-transfer") and c.boolget("file-transfer") file_size_limit = max(self.caps.intget("file-size-limit"), c.intget("file-size-limit")) file_max_packet_size = int(file_transfer) * (1024 + file_size_limit*1024*1024) self.client_protocol.max_packet_size = max(self.client_protocol.max_packet_size, file_max_packet_size) self.server_protocol.max_packet_size = max(self.server_protocol.max_packet_size, file_max_packet_size) packet = ("hello", caps) elif packet_type=="info-response": #adds proxy info: #note: this is only seen by the client application #"xpra info" is a new connection, which talks to the proxy server... info = packet[1] info.update(self.get_proxy_info(proto)) elif packet_type=="lost-window": wid = packet[1] #mark it as lost so we can drop any current/pending frames self.lost_windows.add(wid) #queue it so it gets cleaned safely (for video encoders mostly): self.encode_queue.put(packet) #and fall through so tell the client immediately elif packet_type=="draw": #use encoder thread: self.encode_queue.put(packet) #which will queue the packet itself when done: return #we do want to reformat cursor packets... #as they will have been uncompressed by the network layer already: elif packet_type=="cursor": #packet = ["cursor", x, y, width, height, xhot, yhot, serial, pixels, name] #or: #packet = ["cursor", "png", x, y, width, height, xhot, yhot, serial, pixels, name] #or: #packet = ["cursor", ""] if len(packet)>=8: #hard to distinguish png cursors from normal cursors... try: int(packet[1]) self._packet_recompress(packet, 8, "cursor") except: self._packet_recompress(packet, 9, "cursor") elif packet_type=="window-icon": self._packet_recompress(packet, 5, "icon") elif packet_type=="send-file": if packet[6]: packet[6] = Compressed("file-data", packet[6]) elif packet_type=="send-file-chunk": if packet[3]: packet[3] = Compressed("file-chunk-data", packet[3]) elif packet_type=="challenge": from xpra.net.crypto import get_salt #client may have already responded to the challenge, #so we have to handle authentication from this end salt = packet[1] digest = packet[3] client_salt = get_salt(len(salt)) salt = xor_str(salt, client_salt) if digest!=b"hmac": self.stop("digest mode '%s' not supported", std(digest)) return password = self.session_options.get("password") if not password: self.stop("authentication requested by the server, but no password available for this session") return import hmac, hashlib password = strtobytes(password) salt = strtobytes(salt) challenge_response = hmac.HMAC(password, salt, digestmod=hashlib.md5).hexdigest() log.info("sending %s challenge response", digest) self.send_hello(challenge_response, client_salt) return self.queue_client_packet(packet) def encode_loop(self): """ thread for slower encoding related work """ while not self.exit: packet = self.encode_queue.get() if packet is None: return try: packet_type = packet[0] if packet_type=="lost-window": wid = packet[1] self.lost_windows.remove(wid) ve = self.video_encoders.get(wid) if ve: del self.video_encoders[wid] del self.video_encoders_last_used_time[wid] ve.clean() elif packet_type=="draw": #modify the packet with the video encoder: if self.process_draw(packet): #then send it as normal: self.queue_client_packet(packet) elif packet_type=="check-video-timeout": #not a real packet, this is added by the timeout check: wid = packet[1] ve = self.video_encoders.get(wid) now = time.time() idle_time = now-self.video_encoders_last_used_time.get(wid) if ve and idle_time>VIDEO_TIMEOUT: enclog("timing out the video encoder context for window %s", wid) #timeout is confirmed, we are in the encoding thread, #so it is now safe to clean it up: ve.clean() del self.video_encoders[wid] del self.video_encoders_last_used_time[wid] else: enclog.warn("unexpected encode packet: %s", packet_type) except: enclog.warn("error encoding packet", exc_info=True) def process_draw(self, packet): wid, x, y, width, height, encoding, pixels, _, rowstride, client_options = packet[1:11] #never modify mmap packets if encoding=="mmap": return True #we have a proxy video packet: rgb_format = client_options.get("rgb_format", "") enclog("proxy draw: client_options=%s", client_options) def send_updated(encoding, compressed_data, updated_client_options): #update the packet with actual encoding data used: packet[6] = encoding packet[7] = compressed_data packet[10] = updated_client_options enclog("returning %s bytes from %s, options=%s", len(compressed_data), len(pixels), updated_client_options) return (wid not in self.lost_windows) def passthrough(strip_alpha=True): enclog("proxy draw: %s passthrough (rowstride: %s vs %s, strip alpha=%s)", rgb_format, rowstride, client_options.get("rowstride", 0), strip_alpha) if strip_alpha: #passthrough as plain RGB: Xindex = rgb_format.upper().find("X") if Xindex>=0 and len(rgb_format)==4: #force clear alpha (which may be garbage): newdata = bytearray(pixels) for i in range(len(pixels)/4): newdata[i*4+Xindex] = chr(255) packet[9] = client_options.get("rowstride", 0) cdata = bytes(newdata) else: cdata = pixels new_client_options = {"rgb_format" : rgb_format} else: #preserve cdata = pixels new_client_options = client_options wrapped = Compressed("%s pixels" % encoding, cdata) #FIXME: we should not assume that rgb32 is supported here... #(we may have to convert to rgb24..) return send_updated("rgb32", wrapped, new_client_options) proxy_video = client_options.get("proxy", False) if PASSTHROUGH and (encoding in ("rgb32", "rgb24") or proxy_video): #we are dealing with rgb data, so we can pass it through: return passthrough(proxy_video) elif not self.video_encoder_types or not client_options or not proxy_video: #ensure we don't try to re-compress the pixel data in the network layer: #(re-add the "compressed" marker that gets lost when we re-assemble packets) packet[7] = Compressed("%s pixels" % encoding, packet[7]) return True #video encoding: find existing encoder ve = self.video_encoders.get(wid) if ve: if ve in self.lost_windows: #we cannot clean the video encoder here, there may be more frames queue up #"lost-window" in encode_loop will take care of it safely return False #we must verify that the encoder is still valid #and scrap it if not (ie: when window is resized) if ve.get_width()!=width or ve.get_height()!=height: enclog("closing existing video encoder %s because dimensions have changed from %sx%s to %sx%s", ve, ve.get_width(), ve.get_height(), width, height) ve.clean() ve = None elif ve.get_encoding()!=encoding: enclog("closing existing video encoder %s because encoding has changed from %s to %s", ve.get_encoding(), encoding) ve.clean() ve = None #scaling and depth are proxy-encoder attributes: scaling = client_options.get("scaling", (1, 1)) depth = client_options.get("depth", 24) rowstride = client_options.get("rowstride", rowstride) quality = client_options.get("quality", -1) speed = client_options.get("speed", -1) timestamp = client_options.get("timestamp") image = ImageWrapper(x, y, width, height, pixels, rgb_format, depth, rowstride, planes=ImageWrapper.PACKED) if timestamp is not None: image.set_timestamp(timestamp) #the encoder options are passed through: encoder_options = client_options.get("options", {}) if not ve: #make a new video encoder: spec = self._find_video_encoder(encoding, rgb_format) if spec is None: #no video encoder! enc_pillow = get_codec("enc_pillow") if not enc_pillow: from xpra.server.picture_encode import warn_encoding_once warn_encoding_once("no-video-no-PIL", "no video encoder found for rgb format %s, sending as plain RGB!" % rgb_format) return passthrough(True) enclog("no video encoder available: sending as jpeg") coding, compressed_data, client_options, _, _, _, _ = enc_pillow.encode("jpeg", image, quality, speed, False) return send_updated(coding, compressed_data, client_options) enclog("creating new video encoder %s for window %s", spec, wid) ve = spec.make_instance() #dst_formats is specified with first frame only: dst_formats = client_options.get("dst_formats") if dst_formats is not None: #save it in case we timeout the video encoder, #so we can instantiate it again, even from a frame no>1 self.video_encoders_dst_formats = dst_formats else: assert self.video_encoders_dst_formats, "BUG: dst_formats not specified for proxy and we don't have it either" dst_formats = self.video_encoders_dst_formats ve.init_context(width, height, rgb_format, dst_formats, encoding, quality, speed, scaling, {}) self.video_encoders[wid] = ve self.video_encoders_last_used_time[wid] = time.time() #just to make sure this is always set #actual video compression: enclog("proxy compression using %s with quality=%s, speed=%s", ve, quality, speed) data, out_options = ve.compress_image(image, quality, speed, encoder_options) #pass through some options if we don't have them from the encoder #(maybe we should also use the "pts" from the real server?) for k in ("timestamp", "rgb_format", "depth", "csc"): if k not in out_options and k in client_options: out_options[k] = client_options[k] self.video_encoders_last_used_time[wid] = time.time() return send_updated(ve.get_encoding(), Compressed(encoding, data), out_options) def timeout_video_encoders(self): #have to be careful as another thread may come in... #so we just ask the encode thread (which deals with encoders already) #to do what may need to be done if we find a timeout: now = time.time() for wid in list(self.video_encoders_last_used_time.keys()): idle_time = int(now-self.video_encoders_last_used_time.get(wid)) if idle_time is None: continue enclog("timeout_video_encoders() wid=%s, idle_time=%s", wid, idle_time) if idle_time and idle_time>VIDEO_TIMEOUT: self.encode_queue.put(["check-video-timeout", wid]) return True #run again def _find_video_encoder(self, encoding, rgb_format): #try the one specified first, then all the others: try_encodings = [encoding] + [x for x in self.video_helper.get_encodings() if x!=encoding] for encoding in try_encodings: colorspace_specs = self.video_helper.get_encoder_specs(encoding) especs = colorspace_specs.get(rgb_format) if len(especs)==0: continue for etype in self.video_encoder_types: for spec in especs: if etype==spec.codec_type: enclog("_find_video_encoder(%s, %s)=%s", encoding, rgb_format, spec) return spec enclog("_find_video_encoder(%s, %s) not found", encoding, rgb_format) return None def get_window_info(self): info = {} now = time.time() for wid, encoder in self.video_encoders.items(): einfo = encoder.get_info() einfo["idle_time"] = int(now-self.video_encoders_last_used_time.get(wid, 0)) info[wid] = { "proxy" : { "" : encoder.get_type(), "encoder" : einfo }, } enclog("get_window_info()=%s", info) return info
class EncodingsMixin(StubSourceMixin): def __init__(self, core_encodings, encodings, default_encoding, scaling_control, default_quality, default_min_quality, default_speed, default_min_speed): log("ServerSource%s", (core_encodings, encodings, default_encoding, scaling_control, default_quality, default_min_quality, default_speed, default_min_speed)) self.server_core_encodings = core_encodings self.server_encodings = encodings self.default_encoding = default_encoding self.scaling_control = scaling_control self.default_quality = default_quality #default encoding quality for lossy encodings self.default_min_quality = default_min_quality #default minimum encoding quality self.default_speed = default_speed #encoding speed (only used by x264) self.default_min_speed = default_min_speed #default minimum encoding speed self.default_batch_config = DamageBatchConfig( ) #contains default values, some of which may be supplied by the client self.global_batch_config = self.default_batch_config.clone( ) #global batch config self.vrefresh = -1 self.supports_transparency = False self.encoding = None #the default encoding for all windows self.encodings = () #all the encodings supported by the client self.core_encodings = () self.window_icon_encodings = ["premult_argb32"] self.rgb_formats = ("RGB", ) self.encoding_options = typedict() self.icons_encoding_options = typedict() self.default_encoding_options = typedict() self.auto_refresh_delay = 0 self.zlib = True self.lz4 = use_lz4 self.lzo = use_lzo #for managing the recalculate_delays work: self.calculate_window_pixels = {} self.calculate_window_ids = set() self.calculate_timer = 0 self.calculate_last_time = 0 #if we "proxy video", we will modify the video helper to add #new encoders, so we must make a deep copy to preserve the original #which may be used by other clients (other ServerSource instances) self.video_helper = getVideoHelper().clone() # the queues of damage requests we work through: self.encode_work_queue = Queue( ) #holds functions to call to compress data (pixels, clipboard) #items placed in this queue are picked off by the "encode" thread, #the functions should add the packets they generate to the 'packet_queue' self.packet_queue = deque( ) #holds actual packets ready for sending (already encoded) #these packets are picked off by the "protocol" via 'next_packet()' #format: packet, wid, pixels, start_send_cb, end_send_cb #(only packet is required - the rest can be 0/None for clipboard packets) self.encode_thread = start_thread(self.encode_loop, "encode") def cleanup(self): self.cancel_recalculate_timer() #Warning: this mixin must come AFTER the window mixin! #to make sure that it is safe to add the end of queue marker: #(all window sources will have stopped queuing data) self.encode_work_queue.put(None) #this should be a noop since we inherit an initialized helper: self.video_helper.cleanup() def get_caps(self): caps = {} if self.wants_encodings and self.encoding: caps["encoding"] = self.encoding if self.wants_features: caps.update({ "auto_refresh_delay": self.auto_refresh_delay, }) return caps def compressed_wrapper(self, datatype, data, min_saving=128): if self.zlib or self.lz4 or self.lzo: cw = compressed_wrapper(datatype, data, zlib=self.zlib, lz4=self.lz4, lzo=self.lzo, can_inline=False) if len(cw) + min_saving <= len(data): #the compressed version is smaller, use it: return cw #skip compressed version: fall through #we can't compress, so at least avoid warnings in the protocol layer: return Compressed(datatype, data, can_inline=True) def recalculate_delays(self): """ calls update_averages() on ServerSource.statistics (GlobalStatistics) and WindowSource.statistics (WindowPerformanceStatistics) for each window id in calculate_window_ids, this runs in the worker thread. """ self.calculate_timer = 0 if self.is_closed(): return now = monotonic_time() self.calculate_last_time = now p = self.protocol if not p: return conn = p._conn if not conn: return self.statistics.bytes_sent.append((now, conn.output_bytecount)) self.statistics.update_averages() self.update_bandwidth_limits() wids = tuple(self.calculate_window_ids ) #make a copy so we don't clobber new wids focus = self.get_focus() sources = self.window_sources.items() maximized_wids = [ wid for wid, source in sources if source is not None and source.maximized ] fullscreen_wids = [ wid for wid, source in sources if source is not None and source.fullscreen ] log( "recalculate_delays() wids=%s, focus=%s, maximized=%s, fullscreen=%s", wids, focus, maximized_wids, fullscreen_wids) for wid in wids: #this is safe because we only add to this set from other threads: self.calculate_window_ids.remove(wid) try: del self.calculate_window_pixels[wid] except: pass ws = self.window_sources.get(wid) if ws is None: continue try: ws.statistics.update_averages() ws.calculate_batch_delay( wid == focus, len(fullscreen_wids) > 0 and wid not in fullscreen_wids, len(maximized_wids) > 0 and wid not in maximized_wids) ws.reconfigure() except: log.error("error on window %s", wid, exc_info=True) if self.is_closed(): return #allow other threads to run #(ideally this would be a low priority thread) sleep(0) #calculate weighted average as new global default delay: wdimsum, wdelay, tsize, tcount = 0, 0, 0, 0 for ws in tuple(self.window_sources.values()): if ws.batch_config.last_updated <= 0: continue w, h = ws.window_dimensions tsize += w * h tcount += 1 time_w = 2.0 + (now - ws.batch_config.last_updated ) #add 2 seconds to even things out weight = w * h * time_w wdelay += ws.batch_config.delay * weight wdimsum += weight if wdimsum > 0 and tcount > 0: #weighted delay: avg_size = tsize / tcount wdelay = wdelay / wdimsum #store the delay as a normalized value per megapixel: delay = wdelay * 1000000 / avg_size self.global_batch_config.last_delays.append((now, delay)) self.global_batch_config.delay = delay def may_recalculate(self, wid, pixel_count): if wid in self.calculate_window_ids: return #already scheduled v = self.calculate_window_pixels.get(wid, 0) + pixel_count self.calculate_window_pixels[wid] = v if v < MIN_PIXEL_RECALCULATE: return #not enough pixel updates statslog( "may_recalculate(%i, %i) total %i pixels, scheduling recalculate work item", wid, pixel_count, v) self.calculate_window_ids.add(wid) if self.calculate_timer: #already due return delta = monotonic_time() - self.calculate_last_time RECALCULATE_DELAY = 1.0 #1s if delta > RECALCULATE_DELAY: add_work_item(self.recalculate_delays) else: self.calculate_timer = self.timeout_add( int(1000 * (RECALCULATE_DELAY - delta)), add_work_item, self.recalculate_delays) def cancel_recalculate_timer(self): ct = self.calculate_timer if ct: self.calculate_timer = 0 self.source_remove(ct) def parse_client_caps(self, c): #batch options: def batch_value(prop, default, minv=None, maxv=None): assert default is not None def parse_batch_int(value, varname): if value is not None: try: return int(value) except: log.error("invalid value for batch option %s: %s", varname, value) return None #from client caps first: cpname = "batch.%s" % prop v = parse_batch_int(c.get(cpname), cpname) #try env: if v is None: evname = "XPRA_BATCH_%s" % prop.upper() v = parse_batch_int(os.environ.get(evname), evname) #fallback to default: if v is None: v = default if minv is not None: v = max(minv, v) if maxv is not None: v = min(maxv, v) assert v is not None return v #general features: self.zlib = c.boolget("zlib", True) self.lz4 = c.boolget("lz4", False) and use_lz4 self.lzo = c.boolget("lzo", False) and use_lzo self.vrefresh = c.intget("vrefresh", -1) default_min_delay = max(DamageBatchConfig.MIN_DELAY, 1000 // (self.vrefresh or 60)) dbc = self.default_batch_config dbc.always = bool(batch_value("always", DamageBatchConfig.ALWAYS)) dbc.min_delay = batch_value("min_delay", default_min_delay, 0, 1000) dbc.max_delay = batch_value("max_delay", DamageBatchConfig.MAX_DELAY, 1, 15000) dbc.max_events = batch_value("max_events", DamageBatchConfig.MAX_EVENTS) dbc.max_pixels = batch_value("max_pixels", DamageBatchConfig.MAX_PIXELS) dbc.time_unit = batch_value("time_unit", DamageBatchConfig.TIME_UNIT, 1) dbc.delay = batch_value("delay", DamageBatchConfig.START_DELAY, 0) log("default batch config: %s", dbc) #encodings: self.encodings = c.strlistget("encodings") self.core_encodings = c.strlistget("encodings.core", self.encodings) if self.send_windows and not self.core_encodings: raise Exception("client failed to specify any supported encodings") if "png" in self.core_encodings: self.window_icon_encodings.append("png") self.window_icon_encodings = c.strlistget("encodings.window-icon", ["premult_argb32"]) self.rgb_formats = c.strlistget("encodings.rgb_formats", ["RGB"]) #skip all other encoding related settings if we don't send pixels: if not self.send_windows: log("windows/pixels forwarding is disabled for this client") else: self.parse_encoding_caps(c) def parse_encoding_caps(self, c): self.set_encoding(c.strget("encoding", None), None) #encoding options (filter): #1: these properties are special cased here because we #defined their name before the "encoding." prefix convention, #or because we want to pass default values (zlib/lz4): for k, ek in { "initial_quality": "initial_quality", "quality": "quality", }.items(): if k in c: self.encoding_options[ek] = c.intget(k) for k, ek in { "zlib": "rgb_zlib", "lz4": "rgb_lz4", }.items(): if k in c: self.encoding_options[ek] = c.boolget(k) #2: standardized encoding options: for k in c.keys(): if k.startswith(b"theme.") or k.startswith(b"encoding.icons."): self.icons_encoding_options[k.replace( b"encoding.icons.", b"").replace(b"theme.", b"")] = c[k] elif k.startswith(b"encoding."): stripped_k = k[len(b"encoding."):] if stripped_k in (b"transparency", b"rgb_zlib", b"rgb_lz4", b"rgb_lzo", b"video_scaling"): v = c.boolget(k) elif stripped_k in (b"initial_quality", b"initial_speed", b"min-quality", b"quality", b"min-speed", b"speed"): v = c.intget(k) else: v = c.get(k) self.encoding_options[stripped_k] = v log("encoding options: %s", self.encoding_options) log("icons encoding options: %s", self.icons_encoding_options) #handle proxy video: add proxy codec to video helper: pv = self.encoding_options.boolget("proxy.video") proxylog("proxy.video=%s", pv) if pv: #enabling video proxy: try: self.parse_proxy_video() except: proxylog.error("failed to parse proxy video", exc_info=True) self.default_encoding_options[ "scaling.control"] = self.encoding_options.get( "scaling.control", self.scaling_control) q = self.encoding_options.intget("quality", self.default_quality) #0.7 onwards: if q > 0: self.default_encoding_options["quality"] = q mq = self.encoding_options.intget("min-quality", self.default_min_quality) if mq > 0 and (q <= 0 or q > mq): self.default_encoding_options["min-quality"] = mq s = self.encoding_options.intget("speed", self.default_speed) if s > 0: self.default_encoding_options["speed"] = s ms = self.encoding_options.intget("min-speed", self.default_min_speed) if ms > 0 and (s <= 0 or s > ms): self.default_encoding_options["min-speed"] = ms log("default encoding options: %s", self.default_encoding_options) self.auto_refresh_delay = c.intget("auto_refresh_delay", 0) if self.mmap_size == 0: others = [ x for x in self.core_encodings if x in self.server_core_encodings and x != self.encoding ] if self.encoding == "auto": s = "automatic picture encoding enabled" else: s = "using %s as primary encoding" % self.encoding if others: log.info(" %s, also available:", s) log.info(" %s", csv(others)) else: log.warn(" %s", s) log.warn(" no other encodings are available!") def parse_proxy_video(self): from xpra.codecs.enc_proxy.encoder import Encoder proxy_video_encodings = self.encoding_options.get( "proxy.video.encodings") proxylog("parse_proxy_video() proxy.video.encodings=%s", proxy_video_encodings) for encoding, colorspace_specs in proxy_video_encodings.items(): for colorspace, spec_props in colorspace_specs.items(): for spec_prop in spec_props: #make a new spec based on spec_props: spec = video_spec(codec_class=Encoder, codec_type="proxy", encoding=encoding) for k, v in spec_prop.items(): setattr(spec, k, v) proxylog("parse_proxy_video() adding: %s / %s / %s", encoding, colorspace, spec) self.video_helper.add_encoder_spec(encoding, colorspace, spec) ###################################################################### # Functions used by the server to request something # (window events, stats, user requests, etc) # def set_auto_refresh_delay(self, delay, window_ids): if window_ids is not None: wss = (self.window_sources.get(wid) for wid in window_ids) else: wss = self.window_sources.values() for ws in wss: if ws is not None: ws.set_auto_refresh_delay(delay) def set_encoding(self, encoding, window_ids, strict=False): """ Changes the encoder for the given 'window_ids', or for all windows if 'window_ids' is None. """ log("set_encoding(%s, %s, %s)", encoding, window_ids, strict) if not self.ui_client: return if encoding and encoding != "auto": #old clients (v0.9.x and earlier) only supported 'rgb24' as 'rgb' mode: if encoding == "rgb24": encoding = "rgb" if encoding not in self.encodings: log.warn("Warning: client specified '%s' encoding,", encoding) log.warn(" but it only supports: %s" % csv(self.encodings)) if encoding not in self.server_encodings: log.error("Error: encoding %s is not supported by this server", encoding) encoding = None if not encoding: encoding = "auto" if window_ids is not None: wss = [self.window_sources.get(wid) for wid in window_ids] else: wss = self.window_sources.values() #if we're updating all the windows, reset global stats too: if set(wss).issuperset(self.window_sources.values()): log("resetting global stats") self.statistics.reset() self.global_batch_config = self.default_batch_config.clone() for ws in wss: if ws is not None: ws.set_new_encoding(encoding, strict) if not window_ids: self.encoding = encoding def get_info(self): info = { "auto_refresh": self.auto_refresh_delay, "lz4": self.lz4, "lzo": self.lzo, "vertical-refresh": self.vrefresh, } ieo = dict(self.icons_encoding_options) try: del ieo["default.icons"] except: pass #encoding: info.update({ "encodings": { "": self.encodings, "core": self.core_encodings, "window-icon": self.window_icon_encodings, }, "icons": ieo, }) einfo = {"default": self.default_encoding or ""} einfo.update(self.default_encoding_options) einfo.update(self.encoding_options) info.setdefault("encoding", {}).update(einfo) return info def set_min_quality(self, min_quality): for ws in tuple(self.window_sources.values()): ws.set_min_quality(min_quality) def set_quality(self, quality): for ws in tuple(self.window_sources.values()): ws.set_quality(quality) def set_min_speed(self, min_speed): for ws in tuple(self.window_sources.values()): ws.set_min_speed(min_speed) def set_speed(self, speed): for ws in tuple(self.window_sources.values()): ws.set_speed(speed) def update_batch(self, wid, window, batch_props): ws = self.window_sources.get(wid) if ws: if "reset" in batch_props: ws.batch_config = self.make_batch_config(wid, window) for x in ("always", "locked"): if x in batch_props: setattr(ws.batch_config, x, batch_props.boolget(x)) for x in ("min_delay", "max_delay", "timeout_delay", "delay"): if x in batch_props: setattr(ws.batch_config, x, batch_props.intget(x)) log("batch config updated for window %s: %s", wid, ws.batch_config) def make_batch_config(self, wid, window): batch_config = self.default_batch_config.clone() batch_config.wid = wid #scale initial delay based on window size #(the global value is normalized to 1MPixel) #but use sqrt to smooth things and prevent excesses #(ie: a 4MPixel window, will start at 2 times the global delay) #(ie: a 0.5MPixel window will start at 0.7 times the global delay) w, h = window.get_dimensions() ratio = float(w * h) / 1000000 batch_config.delay = self.global_batch_config.delay * sqrt(ratio) return batch_config def queue_size(self): return self.encode_work_queue.qsize() def call_in_encode_thread(self, *fn_and_args): """ This is used by WindowSource to queue damage processing to be done in the 'encode' thread. The 'encode_and_send_cb' will then add the resulting packet to the 'packet_queue' via 'queue_packet'. """ self.statistics.compression_work_qsizes.append( (monotonic_time(), self.encode_work_queue.qsize())) self.encode_work_queue.put(fn_and_args) def queue_packet(self, packet, wid=0, pixels=0, start_send_cb=None, end_send_cb=None, fail_cb=None, wait_for_more=False): """ Add a new 'draw' packet to the 'packet_queue'. Note: this code runs in the non-ui thread """ now = monotonic_time() self.statistics.packet_qsizes.append((now, len(self.packet_queue))) if wid > 0: self.statistics.damage_packet_qpixels.append( (now, wid, sum(x[2] for x in tuple(self.packet_queue) if x[1] == wid))) self.packet_queue.append((packet, wid, pixels, start_send_cb, end_send_cb, fail_cb, wait_for_more)) p = self.protocol if p: p.source_has_more() # # The damage packet thread loop: # def encode_loop(self): """ This runs in a separate thread and calls all the function callbacks which are added to the 'encode_work_queue'. Must run until we hit the end of queue marker, to ensure all the queued items get called. """ while True: fn_and_args = self.encode_work_queue.get(True) if fn_and_args is None: return #empty marker #some function calls are optional and can be skipped when closing: #(but some are not, like encoder clean functions) optional_when_closing = fn_and_args[0] if optional_when_closing and self.is_closed(): continue try: fn_and_args[1](*fn_and_args[2:]) except Exception as e: if self.is_closed(): log( "ignoring encoding error in %s as source is already closed:", fn_and_args[0]) log(" %s", e) else: log.error("Error during encoding:", exc_info=True) del e NOYIELD or sleep(0)
class ProxyInstance(object): def __init__(self, session_options, video_encoder_modules, pings, disp_desc, cipher, encryption_key, caps): self.session_options = session_options self.video_encoder_modules = video_encoder_modules self.pings = pings self.disp_desc = disp_desc self.cipher = cipher self.encryption_key = encryption_key self.caps = caps log("ProxyInstance%s", (session_options, video_encoder_modules, disp_desc, cipher, encryption_key, "%s: %s.." % (type(caps), repr_ellipsized(str(caps))))) self.uuid = get_hex_uuid() self.client_protocol = None self.server_protocol = None #ping handling: self.client_last_ping = 0 self.server_last_ping = 0 self.client_last_ping_echo = 0 self.server_last_ping_echo = 0 self.client_last_ping_latency = 0 self.server_last_ping_latency = 0 self.client_ping_timer = None self.server_ping_timer = None self.client_challenge_packet = None self.exit = False self.lost_windows = None self.encode_queue = None #holds draw packets to encode self.encode_thread = None self.video_encoding_defs = None self.video_encoders = None self.video_encoders_last_used_time = None self.video_encoder_types = None self.video_helper = None def is_alive(self): return not self.exit def run(self): log.info("started %s", self) log.info(" for client %s", self.client_protocol._conn) log.info(" and server %s", self.server_protocol._conn) self.video_init() #setup protocol wrappers: self.server_packets = Queue(PROXY_QUEUE_SIZE) self.client_packets = Queue(PROXY_QUEUE_SIZE) #server connection tweaks: for x in (b"input-devices", b"draw", b"window-icon", b"keymap-changed", b"server-settings"): self.server_protocol.large_packets.append(x) if self.caps.boolget("file-transfer"): for x in (b"send-file", b"send-file-chunk"): self.server_protocol.large_packets.append(x) self.client_protocol.large_packets.append(x) self.server_protocol.set_compression_level( self.session_options.get("compression_level", 0)) self.server_protocol.enable_default_encoder() self.lost_windows = set() self.encode_queue = Queue() self.encode_thread = start_thread(self.encode_loop, "encode") self.start_network_threads() if self.caps.boolget("ping-echo-sourceid"): self.schedule_client_ping() self.send_hello() def start_network_threads(self): raise NotImplementedError() ################################################################################ def close_connections(self, skip_proto, *reasons): for proto in (self.client_protocol, self.server_protocol): if proto and proto != skip_proto: log("sending disconnect to %s", proto) proto.send_disconnect([SERVER_SHUTDOWN] + list(reasons)) #wait for connections to close down cleanly before we exit cp = self.client_protocol sp = self.server_protocol for i in range(10): if cp.is_closed() and sp.is_closed(): break if i == 0: log("waiting for network connections to close") elif i == 1: log.info("waiting for network connections to close") else: log("still waiting %i/10 - client.closed=%s, server.closed=%s", i + 1, cp.is_closed(), sp.is_closed()) sleep(0.1) if not cp.is_closed(): log.warn( "Warning: proxy instance client connection has not been closed yet:" ) log.warn(" %s", cp) if not sp.is_closed(): log.warn( "Warning: proxy instance server connection has not been closed yet:" ) log.warn(" %s", sp) def send_disconnect(self, proto, *reasons): log("send_disconnect(%s, %s)", proto, reasons) if proto.is_closed(): return proto.send_disconnect(reasons) self.timeout_add(1000, self.force_disconnect, proto) def force_disconnect(self, proto): proto.close() def stop(self, skip_proto, *reasons): log("stop(%s, %s)", skip_proto, reasons) if not self.exit: log.info("stopping %s", self) for x in reasons: log.info(" %s", x) self.exit = True log.info("stopping %s", self) self.cancel_client_ping_timer() self.cancel_server_ping_timer() self.stop_encode_thread() self.close_connections(skip_proto, *reasons) self.stopped() def stopped(self): pass ################################################################################ def get_proxy_info(self, proto): sinfo = {} sinfo.update(get_server_info()) sinfo.update(get_thread_info(proto)) linfo = {} if self.client_last_ping_latency: linfo["client"] = int(self.client_last_ping_latency) if self.server_last_ping_latency: linfo["server"] = int(self.server_last_ping_latency) return { "proxy": { "version": XPRA_VERSION, "": sinfo, "latency": linfo, }, "window": self.get_window_info(), } def get_window_info(self): info = {} now = monotonic_time() for wid, encoder in self.video_encoders.items(): einfo = encoder.get_info() einfo["idle_time"] = int( now - self.video_encoders_last_used_time.get(wid, 0)) info[wid] = { "proxy": { "": encoder.get_type(), "encoder": einfo }, } enclog("get_window_info()=%s", info) return info ################################################################################ def send_hello(self, challenge_response=None, client_salt=None): hello = self.filter_client_caps(self.caps) if challenge_response: hello.update({ "challenge_response": challenge_response, "challenge_client_salt": client_salt, }) self.queue_server_packet(("hello", hello)) def sanitize_session_options(self, options): d = {} def number(k, v): return parse_number(int, k, v) OPTION_WHITELIST = { "compression_level": number, "lz4": parse_bool, "lzo": parse_bool, "zlib": parse_bool, "rencode": parse_bool, "bencode": parse_bool, "yaml": parse_bool, } for k, v in options.items(): parser = OPTION_WHITELIST.get(k) if parser: log("trying to add %s=%s using %s", k, v, parser) try: d[k] = parser(k, v) except Exception as e: log.warn("failed to parse value %s for %s using %s: %s", v, k, parser, e) return d def filter_client_caps(self, caps, remove=CLIENT_REMOVE_CAPS): fc = self.filter_caps(caps, remove) #the display string may override the username: username = self.disp_desc.get("username") if username: fc["username"] = username #update with options provided via config if any: fc.update(self.sanitize_session_options(self.session_options)) #add video proxies if any: fc["encoding.proxy.video"] = len(self.video_encoding_defs) > 0 if self.video_encoding_defs: fc["encoding.proxy.video.encodings"] = self.video_encoding_defs return fc def filter_server_caps(self, caps): self.server_protocol.enable_encoder_from_caps(caps) return self.filter_caps(caps, ("aliases", )) def filter_caps(self, caps, prefixes): #removes caps that the proxy overrides / does not use: pcaps = {} removed = [] for k in caps.keys(): if any(e for e in prefixes if bytestostr(k).startswith(e)): removed.append(k) else: pcaps[k] = caps[k] log("filtered out %s matching %s", removed, prefixes) #replace the network caps with the proxy's own: pcaps.update(flatten_dict(get_network_caps())) #then add the proxy info: updict(pcaps, "proxy", get_server_info(), flatten_dicts=True) pcaps["proxy"] = True pcaps["proxy.hostname"] = socket.gethostname() return pcaps ################################################################################ def queue_client_packet(self, packet): log("queueing client packet: %s", bytestostr(packet[0])) self.client_packets.put(packet) self.client_protocol.source_has_more() def get_client_packet(self): #server wants a packet p = self.client_packets.get() s = self.client_packets.qsize() log("sending to client: %s (queue size=%i)", p[0], s) return p, None, None, None, True, s > 0 def process_client_packet(self, proto, packet): packet_type = bytestostr(packet[0]) log("process_client_packet: %s", packet_type) if packet_type == Protocol.CONNECTION_LOST: self.stop(proto, "client connection lost") return if packet_type == "set_deflate": #echo it back to the client: self.client_packets.put(packet) self.client_protocol.source_has_more() return if packet_type == "hello": if not self.client_challenge_packet: log.warn("Warning: invalid hello packet from client") log.warn(" received after initial authentication (dropped)") return log("forwarding client hello") log(" for challenge packet %s", self.client_challenge_packet) #update caps with latest hello caps from client: self.caps = typedict(packet[1]) #keep challenge data in the hello response: hello = self.filter_client_caps(self.caps, CLIENT_REMOVE_CAPS_CHALLENGE) self.queue_server_packet(("hello", hello)) return if packet_type == "ping_echo" and self.client_ping_timer and len( packet) >= 7 and packet[6] == strtobytes(self.uuid): #this is one of our ping packets: self.client_last_ping_echo = packet[1] self.client_last_ping_latency = 1000 * monotonic_time( ) - self.client_last_ping_echo log("ping-echo: client latency=%.1fms", self.client_last_ping_latency) return #the packet types below are forwarded: if packet_type == "disconnect": reason = bytestostr(packet[1]) log("got disconnect from client: %s", reason) if self.exit: self.client_protocol.close() else: self.stop(None, "disconnect from client", reason) elif packet_type == "send-file": if packet[6]: packet[6] = Compressed("file-data", packet[6]) elif packet_type == "send-file-chunk": if packet[3]: packet[3] = Compressed("file-chunk-data", packet[3]) self.queue_server_packet(packet) def queue_server_packet(self, packet): log("queueing server packet: %s", bytestostr(packet[0])) self.server_packets.put(packet) self.server_protocol.source_has_more() def get_server_packet(self): #server wants a packet p = self.server_packets.get() s = self.server_packets.qsize() log("sending to server: %s (queue size=%i)", p[0], s) return p, None, None, None, True, s > 0 def _packet_recompress(self, packet, index, name): if len(packet) > index: data = packet[index] if len(data) < 512: packet[index] = strtobytes(data) return #this is ugly and not generic! zlib = compression.use_zlib and self.caps.boolget("zlib", True) lz4 = compression.use_lz4 and self.caps.boolget("lz4", False) lzo = compression.use_lzo and self.caps.boolget("lzo", False) if zlib or lz4 or lzo: packet[index] = compressed_wrapper(name, data, zlib=zlib, lz4=lz4, lzo=lzo, can_inline=False) else: #prevent warnings about large uncompressed data packet[index] = Compressed("raw %s" % name, data, can_inline=True) def cancel_server_ping_timer(self): spt = self.server_ping_timer if spt: self.server_ping_timer = None self.source_remove(spt) def cancel_client_ping_timer(self): cpt = self.client_ping_timer if cpt: self.client_ping_timer = None self.source_remove(cpt) def schedule_server_ping(self): self.cancel_server_ping_timer() self.server_last_ping_echo = monotonic_time() self.server_ping_timer = self.timeout_add(PING_INTERVAL, self.send_server_ping) def schedule_client_ping(self): self.cancel_client_ping_timer() self.client_last_ping_echo = monotonic_time() self.client_ping_timer = self.timeout_add(PING_INTERVAL, self.send_client_ping) def send_server_ping(self): #if we've already sent one, check for the echo: if self.server_last_ping: delta = self.server_last_ping - self.server_last_ping_echo if delta > PING_WARNING: log.warn("Warning: late server ping, %i seconds", delta) if delta > PING_TIMEOUT: log.error("Error: server ping timeout, %i seconds", delta) self.stop(None, "proxy to server ping timeout") return False now = monotonic_time() self.server_last_ping = now self.queue_server_packet( ("ping", int(now * 1000), int(time() * 1000), self.uuid)) return True def send_client_ping(self): #if we've already sent one, check for the echo: if self.client_last_ping: delta = self.client_last_ping - self.client_last_ping_echo if delta > PING_WARNING: log.warn("Warning: late client ping, %i seconds", delta) if delta > PING_TIMEOUT: log.error("Error: client ping timeout, %i seconds", delta) self.stop(None, "proxy to client ping timeout") return False now = monotonic_time() self.client_last_ping = now self.queue_client_packet( ("ping", int(now * 1000), int(time() * 1000), self.uuid)) return True def process_server_packet(self, proto, packet): packet_type = bytestostr(packet[0]) log("process_server_packet: %s", packet_type) if packet_type == Protocol.CONNECTION_LOST: self.stop(proto, "server connection lost") return if packet_type == "disconnect": reason = bytestostr(packet[1]) log("got disconnect from server: %s", reason) if self.exit: self.server_protocol.close() else: self.stop(None, "disconnect from server", reason) elif packet_type == "hello": c = typedict(packet[1]) if c.boolget("ping-echo-sourceid"): self.schedule_server_ping() maxw, maxh = c.intpair("max_desktop_size", (4096, 4096)) caps = self.filter_server_caps(c) #add new encryption caps: if self.cipher: from xpra.net.crypto import crypto_backend_init, new_cipher_caps, DEFAULT_PADDING crypto_backend_init() padding_options = self.caps.strlistget( "cipher.padding.options", [DEFAULT_PADDING]) auth_caps = new_cipher_caps(self.client_protocol, self.cipher, self.encryption_key, padding_options) caps.update(auth_caps) #may need to bump packet size: proto.max_packet_size = max(16 * 1024 * 1024, maxw * maxh * 4 * 4) file_transfer = self.caps.boolget("file-transfer") and c.boolget( "file-transfer") file_size_limit = max(self.caps.intget("file-size-limit"), c.intget("file-size-limit")) file_max_packet_size = int(file_transfer) * ( 1024 + file_size_limit * 1024 * 1024) self.client_protocol.max_packet_size = max( self.client_protocol.max_packet_size, file_max_packet_size) self.server_protocol.max_packet_size = max( self.server_protocol.max_packet_size, file_max_packet_size) packet = ("hello", caps) elif packet_type == "ping_echo" and self.server_ping_timer and len( packet) >= 7 and packet[6] == strtobytes(self.uuid): #this is one of our ping packets: self.server_last_ping_echo = packet[1] self.server_last_ping_latency = 1000 * monotonic_time( ) - self.server_last_ping_echo log("ping-echo: server latency=%.1fms", self.server_last_ping_latency) return elif packet_type == "info-response": #adds proxy info: #note: this is only seen by the client application #"xpra info" is a new connection, which talks to the proxy server... info = packet[1] info.update(self.get_proxy_info(proto)) elif packet_type == "lost-window": wid = packet[1] #mark it as lost so we can drop any current/pending frames self.lost_windows.add(wid) #queue it so it gets cleaned safely (for video encoders mostly): self.encode_queue.put(packet) #and fall through so tell the client immediately elif packet_type == "draw": #use encoder thread: self.encode_queue.put(packet) #which will queue the packet itself when done: return #we do want to reformat cursor packets... #as they will have been uncompressed by the network layer already: elif packet_type == "cursor": #packet = ["cursor", x, y, width, height, xhot, yhot, serial, pixels, name] #or: #packet = ["cursor", "png", x, y, width, height, xhot, yhot, serial, pixels, name] #or: #packet = ["cursor", ""] if len(packet) >= 8: #hard to distinguish png cursors from normal cursors... try: int(packet[1]) self._packet_recompress(packet, 8, "cursor") except (TypeError, ValueError): self._packet_recompress(packet, 9, "cursor") elif packet_type == "window-icon": self._packet_recompress(packet, 5, "icon") elif packet_type == "send-file": if packet[6]: packet[6] = Compressed("file-data", packet[6]) elif packet_type == "send-file-chunk": if packet[3]: packet[3] = Compressed("file-chunk-data", packet[3]) elif packet_type == "challenge": password = self.disp_desc.get("password", self.session_options.get("password")) log("password from %s / %s = %s", self.disp_desc, self.session_options, password) if not password: if not PASSTHROUGH_AUTH: self.stop(None, "authentication requested by the server,", "but no password is available for this session") #otherwise, just forward it to the client self.client_challenge_packet = packet else: from xpra.net.digest import get_salt, gendigest #client may have already responded to the challenge, #so we have to handle authentication from this end server_salt = bytestostr(packet[1]) l = len(server_salt) digest = bytestostr(packet[3]) salt_digest = "xor" if len(packet) >= 5: salt_digest = bytestostr(packet[4]) if salt_digest in ("xor", "des"): if not LEGACY_SALT_DIGEST: self.stop( None, "server uses legacy salt digest '%s'" % salt_digest) return log.warn( "Warning: server using legacy support for '%s' salt digest", salt_digest) if salt_digest == "xor": #with xor, we have to match the size assert l >= 16, "server salt is too short: only %i bytes, minimum is 16" % l assert l <= 256, "server salt is too long: %i bytes, maximum is 256" % l else: #other digest, 32 random bytes is enough: l = 32 client_salt = get_salt(l) salt = gendigest(salt_digest, client_salt, server_salt) challenge_response = gendigest(digest, password, salt) if not challenge_response: log("invalid digest module '%s': %s", digest) self.stop( None, "server requested '%s' digest but it is not supported" % digest) return log.info("sending %s challenge response", digest) self.send_hello(challenge_response, client_salt) return self.queue_client_packet(packet) def stop_encode_thread(self): #empty the encode queue: q = self.encode_queue if q: q.put_nowait(None) q = Queue() q.put(None) self.encode_queue = q def encode_loop(self): """ thread for slower encoding related work """ while not self.exit: packet = self.encode_queue.get() if packet is None: return try: packet_type = packet[0] if packet_type == b"lost-window": wid = packet[1] self.lost_windows.remove(wid) ve = self.video_encoders.get(wid) if ve: del self.video_encoders[wid] del self.video_encoders_last_used_time[wid] ve.clean() elif packet_type == b"draw": #modify the packet with the video encoder: if self.process_draw(packet): #then send it as normal: self.queue_client_packet(packet) elif packet_type == b"check-video-timeout": #not a real packet, this is added by the timeout check: wid = packet[1] ve = self.video_encoders.get(wid) now = monotonic_time() idle_time = now - self.video_encoders_last_used_time.get( wid) if ve and idle_time > VIDEO_TIMEOUT: enclog( "timing out the video encoder context for window %s", wid) #timeout is confirmed, we are in the encoding thread, #so it is now safe to clean it up: ve.clean() del self.video_encoders[wid] del self.video_encoders_last_used_time[wid] else: enclog.warn("unexpected encode packet: %s", packet_type) except Exception: enclog.warn("error encoding packet", exc_info=True) def process_draw(self, packet): wid, x, y, width, height, encoding, pixels, _, rowstride, client_options = packet[ 1:11] #never modify mmap packets if encoding in (b"mmap", b"scroll"): return True client_options = typedict(client_options) #we have a proxy video packet: rgb_format = client_options.strget("rgb_format", "") enclog("proxy draw: encoding=%s, client_options=%s", encoding, client_options) def send_updated(encoding, compressed_data, updated_client_options): #update the packet with actual encoding data used: packet[6] = encoding packet[7] = compressed_data packet[10] = updated_client_options enclog("returning %s bytes from %s, options=%s", len(compressed_data), len(pixels), updated_client_options) return wid not in self.lost_windows def passthrough(strip_alpha=True): enclog( "proxy draw: %s passthrough (rowstride: %s vs %s, strip alpha=%s)", rgb_format, rowstride, client_options.intget("rowstride", 0), strip_alpha) if strip_alpha: #passthrough as plain RGB: Xindex = rgb_format.upper().find("X") if Xindex >= 0 and len(rgb_format) == 4: #force clear alpha (which may be garbage): newdata = bytearray(pixels) for i in range(len(pixels) / 4): newdata[i * 4 + Xindex] = chr(255) packet[9] = client_options.intget("rowstride", 0) cdata = bytes(newdata) else: cdata = pixels new_client_options = {"rgb_format": rgb_format} else: #preserve cdata = pixels new_client_options = client_options wrapped = Compressed("%s pixels" % encoding, cdata) #rgb32 is always supported by all clients: return send_updated("rgb32", wrapped, new_client_options) proxy_video = client_options.boolget("proxy", False) if PASSTHROUGH_RGB and (encoding in ("rgb32", "rgb24") or proxy_video): #we are dealing with rgb data, so we can pass it through: return passthrough(proxy_video) if not self.video_encoder_types or not client_options or not proxy_video: #ensure we don't try to re-compress the pixel data in the network layer: #(re-add the "compressed" marker that gets lost when we re-assemble packets) packet[7] = Compressed("%s pixels" % encoding, packet[7]) return True #video encoding: find existing encoder ve = self.video_encoders.get(wid) if ve: if ve in self.lost_windows: #we cannot clean the video encoder here, there may be more frames queue up #"lost-window" in encode_loop will take care of it safely return False #we must verify that the encoder is still valid #and scrap it if not (ie: when window is resized) if ve.get_width() != width or ve.get_height() != height: enclog( "closing existing video encoder %s because dimensions have changed from %sx%s to %sx%s", ve, ve.get_width(), ve.get_height(), width, height) ve.clean() ve = None elif ve.get_encoding() != encoding: enclog( "closing existing video encoder %s because encoding has changed from %s to %s", ve.get_encoding(), encoding) ve.clean() ve = None #scaling and depth are proxy-encoder attributes: scaling = client_options.intlistget("scaling", (1, 1)) depth = client_options.intget("depth", 24) rowstride = client_options.intget("rowstride", rowstride) quality = client_options.intget("quality", -1) speed = client_options.intget("speed", -1) timestamp = client_options.intget("timestamp") image = ImageWrapper(x, y, width, height, pixels, rgb_format, depth, rowstride, planes=ImageWrapper.PACKED) if timestamp is not None: image.set_timestamp(timestamp) #the encoder options are passed through: encoder_options = client_options.dictget("options", {}) if not ve: #make a new video encoder: spec = self._find_video_encoder(encoding, rgb_format) if spec is None: #no video encoder! enc_pillow = get_codec("enc_pillow") if not enc_pillow: if first_time("no-video-no-PIL-%s" % rgb_format): enclog.warn( "Warning: no video encoder found for rgb format %s", rgb_format) enclog.warn(" sending as plain RGB") return passthrough(True) enclog("no video encoder available: sending as jpeg") coding, compressed_data, client_options = enc_pillow.encode( "jpeg", image, quality, speed, False)[:3] return send_updated(coding, compressed_data, client_options) enclog("creating new video encoder %s for window %s", spec, wid) ve = spec.make_instance() #dst_formats is specified with first frame only: dst_formats = client_options.strlistget("dst_formats") if dst_formats is not None: #save it in case we timeout the video encoder, #so we can instantiate it again, even from a frame no>1 self.video_encoders_dst_formats = dst_formats else: if not self.video_encoders_dst_formats: raise Exception( "BUG: dst_formats not specified for proxy and we don't have it either" ) dst_formats = self.video_encoders_dst_formats ve.init_context(width, height, rgb_format, dst_formats, encoding, quality, speed, scaling, {}) self.video_encoders[wid] = ve self.video_encoders_last_used_time[wid] = monotonic_time( ) #just to make sure this is always set #actual video compression: enclog("proxy compression using %s with quality=%s, speed=%s", ve, quality, speed) data, out_options = ve.compress_image(image, quality, speed, encoder_options) #pass through some options if we don't have them from the encoder #(maybe we should also use the "pts" from the real server?) for k in ("timestamp", "rgb_format", "depth", "csc"): if k not in out_options and k in client_options: out_options[k] = client_options[k] self.video_encoders_last_used_time[wid] = monotonic_time() return send_updated(ve.get_encoding(), Compressed(encoding, data), out_options) def timeout_video_encoders(self): #have to be careful as another thread may come in... #so we just ask the encode thread (which deals with encoders already) #to do what may need to be done if we find a timeout: now = monotonic_time() for wid in tuple(self.video_encoders_last_used_time.keys()): idle_time = int(now - self.video_encoders_last_used_time.get(wid)) if idle_time is None: continue enclog("timeout_video_encoders() wid=%s, idle_time=%s", wid, idle_time) if idle_time and idle_time > VIDEO_TIMEOUT: self.encode_queue.put(["check-video-timeout", wid]) return True #run again def _find_video_encoder(self, video_encoding, rgb_format): #try the one specified first, then all the others: try_encodings = [video_encoding] + [ x for x in self.video_helper.get_encodings() if x != video_encoding ] for encoding in try_encodings: colorspace_specs = self.video_helper.get_encoder_specs(encoding) especs = colorspace_specs.get(rgb_format) if not especs: continue for etype in self.video_encoder_types: for spec in especs: if etype == spec.codec_type: enclog("_find_video_encoder(%s, %s)=%s", encoding, rgb_format, spec) return spec enclog("_find_video_encoder(%s, %s) not found", video_encoding, rgb_format) return None def video_helper_init(self): self.video_helper = getVideoHelper() #only use video encoders (no CSC supported in proxy) self.video_helper.set_modules( video_encoders=self.video_encoder_modules) self.video_helper.init() def video_init(self): enclog("video_init() loading codecs") enclog("video_init() loading pillow encoder") load_codec("enc_pillow") enclog("video_init() will try video encoders: %s", csv(self.video_encoder_modules) or "none") self.video_helper_init() self.video_encoding_defs = {} self.video_encoders = {} self.video_encoders_dst_formats = [] self.video_encoders_last_used_time = {} self.video_encoder_types = [] #figure out which encoders we want to proxy for (if any): encoder_types = set() for encoding in self.video_helper.get_encodings(): colorspace_specs = self.video_helper.get_encoder_specs(encoding) for colorspace, especs in colorspace_specs.items(): if colorspace not in ("BGRX", "BGRA", "RGBX", "RGBA"): #only deal with encoders that can handle plain RGB directly continue for spec in especs: #ie: video_spec("x264") spec_props = spec.to_dict() del spec_props["codec_class"] #not serializable! #we want to win scoring so we get used ahead of other encoders: spec_props["score_boost"] = 50 #limit to 3 video streams we proxy for (we really want 2, # but because of races with garbage collection, we need to allow more) spec_props["max_instances"] = 3 #store it in encoding defs: self.video_encoding_defs.setdefault( encoding, {}).setdefault(colorspace, []).append(spec_props) encoder_types.add(spec.codec_type) enclog("encoder types found: %s", tuple(encoder_types)) #remove duplicates and use preferred order: order = list(PREFERRED_ENCODER_ORDER) for x in tuple(encoder_types): if x not in order: order.append(x) self.video_encoder_types = [x for x in order if x in encoder_types] enclog.info("proxy video encoders: %s", csv(self.video_encoder_types or [ "none", ])) self.timeout_add(VIDEO_TIMEOUT * 1000, self.timeout_video_encoders)
class subprocess_caller(object): """ This is the caller side, wrapping the subprocess. You can call send() to pass packets to it which will get converted to method calls on the receiving end, You can register for signals, in which case your callbacks will be called when those signals are forwarded back. (there is no validation of which signals are valid or not) """ def __init__(self, description="wrapper"): self.process = None self.protocol = None self.command = None self.description = description self.send_queue = Queue() self.signal_callbacks = {} self.large_packets = [] #hook a default packet handlers: self.connect(Protocol.CONNECTION_LOST, self.connection_lost) self.connect(Protocol.GIBBERISH, self.gibberish) def connect(self, signal, cb, *args): """ gobject style signal registration """ self.signal_callbacks.setdefault(signal, []).append((cb, list(args))) def subprocess_exit(self, *args): #beware: this may fire more than once! log("subprocess_exit%s command=%s", args, self.command) self._fire_callback("exit") def start(self): self.process = self.exec_subprocess() self.protocol = self.make_protocol() self.protocol.start() def make_protocol(self): #make a connection using the process stdin / stdout conn = TwoFileConnection(self.process.stdin, self.process.stdout, abort_test=None, target=self.description, info=self.description, close_cb=self.subprocess_exit) conn.timeout = 0 protocol = Protocol(gobject, conn, self.process_packet, get_packet_cb=self.get_packet) #we assume the other end has the same encoders (which is reasonable): #TODO: fallback to bencoder try: protocol.enable_encoder("rencode") except Exception as e: log.warn("failed to enable rencode: %s", e) protocol.enable_encoder("bencode") #we assume this is local, so no compression: protocol.enable_compressor("none") protocol.large_packets = self.large_packets return protocol def exec_subprocess(self): kwargs = self.exec_kwargs() log("exec_subprocess() command=%s, kwargs=%s", self.command, kwargs) proc = subprocess.Popen(self.command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=sys.stderr.fileno(), env=self.get_env(), **kwargs) getChildReaper().add_process(proc, self.description, self.command, True, True, callback=self.subprocess_exit) return proc def get_env(self): env = os.environ.copy() env["XPRA_SKIP_UI"] = "1" env["XPRA_LOG_PREFIX"] = "%s " % self.description #let's make things more complicated than they should be: #on win32, the environment can end up containing unicode, and subprocess chokes on it for k, v in env.items(): try: env[k] = bytestostr(v.encode("utf8")) except: env[k] = bytestostr(v) return env def exec_kwargs(self): if os.name == "posix": return {"close_fds": True} elif sys.platform.startswith("win"): if not WIN32_SHOWWINDOW: startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW return {"startupinfo": startupinfo} return {} def cleanup(self): self.stop() def stop(self): self.stop_process() #call via idle_add to prevent deadlocks on win32! gobject.idle_add(self.stop_protocol) def stop_process(self): log("stop() sending stop request to %s", self.description) proc = self.process if proc and proc.poll() is None: try: proc.terminate() self.process = None except Exception as e: log.warn("failed to stop the wrapped subprocess %s: %s", proc, e) def stop_protocol(self): p = self.protocol if p: self.protocol = None log("%s.stop() calling %s", self, p.close) try: p.close() except Exception as e: log.warn("failed to close the subprocess connection: %s", p, e) def connection_lost(self, *args): log("connection_lost%s", args) self.stop() def gibberish(self, *args): log("gibberish%s", args) self.stop() def get_packet(self): try: item = self.send_queue.get(False) except: item = None return (item, None, None, self.send_queue.qsize() > 0) def send(self, *packet_data): self.send_queue.put(packet_data) p = self.protocol if p: p.source_has_more() def process_packet(self, proto, packet): if DEBUG_WRAPPER: log("process_packet(%s, %s)", proto, [str(x)[:32] for x in packet]) signal_name = bytestostr(packet[0]) self._fire_callback(signal_name, packet[1:]) def _fire_callback(self, signal_name, extra_args=[]): callbacks = self.signal_callbacks.get(signal_name) log("firing callback for %s: %s", signal_name, callbacks) if callbacks: for cb, args in callbacks: try: all_args = list(args) + extra_args gobject.idle_add(cb, self, *all_args) except Exception: log.error("error processing callback %s for %s packet", cb, signal_name, exc_info=True)
class subprocess_caller(object): """ This is the caller side, wrapping the subprocess. You can call send() to pass packets to it which will get converted to method calls on the receiving end, You can register for signals, in which case your callbacks will be called when those signals are forwarded back. (there is no validation of which signals are valid or not) """ def __init__(self, description="wrapper"): self.process = None self.protocol = None self.command = None self.description = description self.send_queue = Queue() self.signal_callbacks = {} self.large_packets = [] #hook a default packet handlers: self.connect(Protocol.CONNECTION_LOST, self.connection_lost) self.connect(Protocol.GIBBERISH, self.gibberish) glib = import_glib() self.idle_add = glib.idle_add self.timeout_add = glib.timeout_add self.source_remove = glib.source_remove def connect(self, signal, cb, *args): """ gobject style signal registration """ self.signal_callbacks.setdefault(signal, []).append((cb, list(args))) def subprocess_exit(self, *args): #beware: this may fire more than once! log("subprocess_exit%s command=%s", args, self.command) self._fire_callback("exit") def start(self): self.start = self.fail_start self.process = self.exec_subprocess() self.protocol = self.make_protocol() self.protocol.start() def fail_start(self): raise Exception("this wrapper has already been started") def abort_test(self, action): p = self.process if p is None or p.poll(): raise ConnectionClosedException("cannot %s: subprocess has terminated" % action) def make_protocol(self): #make a connection using the process stdin / stdout conn = TwoFileConnection(self.process.stdin, self.process.stdout, abort_test=self.abort_test, target=self.description, socktype=self.description, close_cb=self.subprocess_exit) conn.timeout = 0 protocol = Protocol(self, conn, self.process_packet, get_packet_cb=self.get_packet) setup_fastencoder_nocompression(protocol) protocol.large_packets = self.large_packets return protocol def exec_subprocess(self): kwargs = exec_kwargs() env = self.get_env() log("exec_subprocess() command=%s, env=%s, kwargs=%s", self.command, env, kwargs) proc = subprocess.Popen(self.command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=sys.stderr.fileno(), env=env, **kwargs) getChildReaper().add_process(proc, self.description, self.command, True, True, callback=self.subprocess_exit) return proc def get_env(self): env = exec_env() env["XPRA_LOG_PREFIX"] = "%s " % self.description return env def cleanup(self): self.stop() def stop(self): self.stop_process() self.stop_protocol() def stop_process(self): log("%s.stop_process() sending stop request to %s", self, self.description) proc = self.process if proc and proc.poll() is None: try: proc.terminate() self.process = None except Exception as e: log.warn("failed to stop the wrapped subprocess %s: %s", proc, e) def stop_protocol(self): p = self.protocol if p: self.protocol = None log("%s.stop_protocol() calling %s", self, p.close) try: p.close() except Exception as e: log.warn("failed to close the subprocess connection: %s", p, e) def connection_lost(self, *args): log("connection_lost%s", args) self.stop() def gibberish(self, *args): log.warn("%s stopping on gibberish:", self.description) log.warn(" %s", repr_ellipsized(args[1], limit=80)) self.stop() def get_packet(self): try: item = self.send_queue.get(False) except: item = None return (item, None, None, self.send_queue.qsize()>0) def send(self, *packet_data): self.send_queue.put(packet_data) p = self.protocol if p: p.source_has_more() INJECT_FAULT(p) def process_packet(self, proto, packet): if DEBUG_WRAPPER: log("process_packet(%s, %s)", proto, [str(x)[:32] for x in packet]) signal_name = bytestostr(packet[0]) self._fire_callback(signal_name, packet[1:]) INJECT_FAULT(proto) def _fire_callback(self, signal_name, extra_args=[]): callbacks = self.signal_callbacks.get(signal_name) log("firing callback for '%s': %s", signal_name, callbacks) if callbacks: for cb, args in callbacks: try: all_args = list(args) + extra_args self.idle_add(cb, self, *all_args) except Exception: log.error("error processing callback %s for %s packet", cb, signal_name, exc_info=True)
class subprocess_callee(object): """ This is the callee side, wrapping the gobject we want to interact with. All the input received will be converted to method calls on the wrapped object. Subclasses should register the signal handlers they want to see exported back to the caller. The convenience connect_export(signal-name, *args) can be used to forward signals unmodified. You can also call send() to pass packets back to the caller. (there is no validation of which signals are valid or not) """ def __init__(self, input_filename="-", output_filename="-", wrapped_object=None, method_whitelist=None): self.name = "" self.input_filename = input_filename self.output_filename = output_filename self.method_whitelist = method_whitelist self.large_packets = [] #the gobject instance which is wrapped: self.wrapped_object = wrapped_object self.send_queue = Queue() self.protocol = None if HANDLE_SIGINT: #this breaks gobject3! signal.signal(signal.SIGINT, self.handle_signal) signal.signal(signal.SIGTERM, self.handle_signal) self.setup_mainloop() def setup_mainloop(self): glib = import_glib() self.mainloop = glib.MainLoop() self.idle_add = glib.idle_add self.timeout_add = glib.timeout_add self.source_remove = glib.source_remove def connect_export(self, signal_name, *user_data): """ gobject style signal registration for the wrapped object, the signals will automatically be forwarded to the wrapper process using send(signal_name, *signal_args, *user_data) """ log("connect_export%s", [signal_name] + list(user_data)) args = list(user_data) + [signal_name] self.wrapped_object.connect(signal_name, self.export, *args) def export(self, *args): signal_name = args[-1] log("export(%s, ...)", signal_name) data = args[1:-1] self.send(signal_name, *list(data)) def start(self): self.protocol = self.make_protocol() self.protocol.start() try: self.run() return 0 except KeyboardInterrupt as e: if str(e): log.warn("%s", e) return 0 except Exception: log.error("error in main loop", exc_info=True) return 1 finally: self.cleanup() if self.protocol: self.protocol.close() self.protocol = None if self.input_filename=="-": try: self._input.close() except: pass if self.output_filename=="-": try: self._output.close() except: pass def make_protocol(self): #figure out where we read from and write to: if self.input_filename=="-": #disable stdin buffering: self._input = os.fdopen(sys.stdin.fileno(), 'rb', 0) setbinarymode(self._input.fileno()) else: self._input = open(self.input_filename, 'rb') if self.output_filename=="-": #disable stdout buffering: self._output = os.fdopen(sys.stdout.fileno(), 'wb', 0) setbinarymode(self._output.fileno()) else: self._output = open(self.output_filename, 'wb') #stdin and stdout wrapper: conn = TwoFileConnection(self._output, self._input, abort_test=None, target=self.name, socktype=self.name, close_cb=self.net_stop) conn.timeout = 0 protocol = Protocol(self, conn, self.process_packet, get_packet_cb=self.get_packet) setup_fastencoder_nocompression(protocol) protocol.large_packets = self.large_packets return protocol def run(self): self.mainloop.run() def net_stop(self): #this is called from the network thread, #we use idle add to ensure we clean things up from the main thread log("net_stop() will call stop from main thread") self.idle_add(self.stop) def cleanup(self): pass def stop(self): self.cleanup() p = self.protocol log("stop() protocol=%s", p) if p: self.protocol = None p.close() self.do_stop() def do_stop(self): log("stop() stopping mainloop %s", self.mainloop) self.mainloop.quit() def handle_signal(self, sig, frame): """ This is for OS signals SIGINT and SIGTERM """ #next time, just stop: signal.signal(signal.SIGINT, self.signal_stop) signal.signal(signal.SIGTERM, self.signal_stop) signame = SIGNAMES.get(sig, sig) try: log("handle_signal(%s, %s) calling stop from main thread", signame, frame) except: pass #may fail if we were doing IO logging when the signal was received self.send("signal", signame) self.timeout_add(0, self.cleanup) #give time for the network layer to send the signal message self.timeout_add(150, self.stop) def signal_stop(self, sig, frame): """ This time we really want to exit without waiting """ signame = SIGNAMES.get(sig, sig) log("signal_stop(%s, %s) calling stop", signame, frame) self.stop() def send(self, *args): if HEXLIFY_PACKETS: args = args[:1]+[binascii.hexlify(str(x)[:32]) for x in args[1:]] log("send: adding '%s' message (%s items already in queue)", args[0], self.send_queue.qsize()) self.send_queue.put(args) p = self.protocol if p: p.source_has_more() INJECT_FAULT(p) def get_packet(self): try: item = self.send_queue.get(False) except: item = None return (item, None, None, self.send_queue.qsize()>0) def process_packet(self, proto, packet): command = bytestostr(packet[0]) if command==Protocol.CONNECTION_LOST: log("connection-lost: %s, calling stop", packet[1:]) self.net_stop() return elif command==Protocol.GIBBERISH: log.warn("gibberish received:") log.warn(" %s", repr_ellipsized(packet[1], limit=80)) log.warn(" stopping") self.net_stop() return elif command=="stop": log("received stop message") self.net_stop() return elif command=="exit": log("received exit message") sys.exit(0) return #make it easier to hookup signals to methods: attr = command.replace("-", "_") if self.method_whitelist is not None and attr not in self.method_whitelist: log.warn("invalid command: %s (not in whitelist: %s)", attr, self.method_whitelist) return wo = self.wrapped_object if not wo: log("wrapped object is no more, ignoring method call '%s'", attr) return method = getattr(wo, attr, None) if not method: log.warn("unknown command: '%s'", attr) log.warn(" packet: '%s'", repr_ellipsized(str(packet))) return if DEBUG_WRAPPER: log("calling %s.%s%s", wo, attr, str(tuple(packet[1:]))[:128]) self.idle_add(method, *packet[1:]) INJECT_FAULT(proto)
class QueueScheduler(object): def __init__(self): self.main_queue = Queue() self.exit = False self.timer_id = AtomicInteger() self.timers = {} self.timer_lock = RLock() def source_remove(self, tid): log("source_remove(%i)", tid) with self.timer_lock: try: timer = self.timers[tid] if timer is not None: del self.timers[tid] if timer: timer.cancel() except KeyError: pass def idle_add(self, fn, *args, **kwargs): tid = self.timer_id.increase() self.main_queue.put( (self.idle_repeat_call, (tid, fn, args, kwargs), {})) #add an entry, #but use the value False to stop us from trying to call cancel() self.timers[tid] = False return tid def idle_repeat_call(self, tid, fn, args, kwargs): if tid not in self.timers: return False #cancelled return fn(*args, **kwargs) def timeout_add(self, timeout, fn, *args, **kwargs): tid = self.timer_id.increase() self.do_timeout_add(tid, timeout, fn, *args, **kwargs) return tid def do_timeout_add(self, tid, timeout, fn, *args, **kwargs): #emulate glib's timeout_add using Timers args = (tid, timeout, fn, args, kwargs) t = Timer(timeout / 1000.0, self.queue_timeout_function, args) self.timers[tid] = t t.start() def queue_timeout_function(self, tid, timeout, fn, fn_args, fn_kwargs): if tid not in self.timers: return #cancelled #add to run queue: mqargs = [tid, timeout, fn, fn_args, fn_kwargs] self.main_queue.put((self.timeout_repeat_call, mqargs, {})) def timeout_repeat_call(self, tid, timeout, fn, fn_args, fn_kwargs): #executes the function then re-schedules it (if it returns True) if tid not in self.timers: return False #cancelled v = fn(*fn_args, **fn_kwargs) if bool(v): #create a new timer with the same tid: with self.timer_lock: if tid in self.timers: self.do_timeout_add(tid, timeout, fn, *fn_args, **fn_kwargs) else: try: del self.timers[tid] except KeyError: pass #we do the scheduling via timers, so always return False here #so that the main queue won't re-schedule this function call itself: return False def run(self): log("run() queue has %s items already in it", self.main_queue.qsize()) #process "idle_add"/"timeout_add" events in the main loop: while not self.exit: log("run() size=%s", self.main_queue.qsize()) v = self.main_queue.get() if v is None: log("run() None exit marker") break fn, args, kwargs = v log("run() %s%s%s", fn, args, kwargs) try: r = fn(*args, **kwargs) if bool(r): #re-run it self.main_queue.put(v) except Exception: log.error("error during main loop callback %s", fn, exc_info=True) self.exit = True def stop(self): self.exit = True self.stop_main_queue() def stop_main_queue(self): self.main_queue.put(None) #empty the main queue: q = Queue() q.put(None) self.main_queue = q