def close(self): debug("close() closed=%s", self._closed) if self._closed: return self._closed = True self.scheduler.idle_add(self._process_packet_cb, self, [Protocol.CONNECTION_LOST]) if self._conn: try: self._conn.close() if self._log_stats is None and self._conn.input_bytecount == 0 and self._conn.output_bytecount == 0: # no data sent or received, skip logging of stats: self._log_stats = False if self._log_stats: log.info( "connection closed after %s packets received (%s bytes) and %s packets sent (%s bytes)", std_unit(self.input_packetcount), std_unit_dec(self._conn.input_bytecount), std_unit(self.output_packetcount), std_unit_dec(self._conn.output_bytecount), ) except: log.error("error closing %s", self._conn, exc_info=True) self._conn = None self.terminate_queue_threads() self.scheduler.idle_add(self.clean)
def close(self): log("Protocol.close() closed=%s, connection=%s", self._closed, self._conn) if self._closed: return self._closed = True self.idle_add(self._process_packet_cb, self, [Protocol.CONNECTION_LOST]) c = self._conn if c: try: log("Protocol.close() calling %s", c.close) c.close() if self._log_stats is None and self._conn.input_bytecount==0 and self._conn.output_bytecount==0: #no data sent or received, skip logging of stats: self._log_stats = False if self._log_stats: from xpra.simple_stats import std_unit, std_unit_dec log.info("connection closed after %s packets received (%s bytes) and %s packets sent (%s bytes)", std_unit(self.input_packetcount), std_unit_dec(self._conn.input_bytecount), std_unit(self.output_packetcount), std_unit_dec(self._conn.output_bytecount) ) except: log.error("error closing %s", self._conn, exc_info=True) self._conn = None self.terminate_queue_threads() self.idle_add(self.clean) log("Protocol.close() done")
def _process_bandwidth_limit(self, proto, packet): log("_process_bandwidth_limit(%s, %s)", proto, packet) ss = self.get_server_source(proto) if not ss: return bandwidth_limit = packet[1] if not isinstance(bandwidth_limit, (int, long)): raise TypeError("bandwidth-limit must be an integer, not %s" % type(bandwidth_limit)) if (self.bandwidth_limit and bandwidth_limit >= self.bandwidth_limit ) or bandwidth_limit <= 0: bandwidth_limit = self.bandwidth_limit or 0 if ss.bandwidth_limit == bandwidth_limit: #unchanged log("bandwidth limit unchanged: %s", std_unit(bandwidth_limit)) return if bandwidth_limit < MIN_BANDWIDTH_LIMIT: log.warn("Warning: bandwidth limit requested is too low (%s)", std_unit(bandwidth_limit)) bandwidth_limit = MIN_BANDWIDTH_LIMIT if bandwidth_limit >= MAX_BANDWIDTH_LIMIT: log("bandwidth limit over maximum, using no-limit instead") bandwidth_limit = 0 ss.bandwidth_limit = bandwidth_limit #we can't assume to have a full ClientConnection object: client_id = getattr(ss, "counter", "") if bandwidth_limit == 0: bandwidthlog.info( "bandwidth-limit restrictions removed for client %s", client_id) else: bandwidthlog.info("bandwidth-limit changed to %sbps for client %s", std_unit(bandwidth_limit), client_id)
def close(self): log("Protocol.close() closed=%s, connection=%s", self._closed, self._conn) if self._closed: return self._closed = True self.idle_add(self._process_packet_cb, self, [Protocol.CONNECTION_LOST]) c = self._conn if c: try: log("Protocol.close() calling %s", c.close) c.close() if self._log_stats is None and self._conn.input_bytecount == 0 and self._conn.output_bytecount == 0: #no data sent or received, skip logging of stats: self._log_stats = False if self._log_stats: from xpra.simple_stats import std_unit, std_unit_dec log.info( "connection closed after %s packets received (%s bytes) and %s packets sent (%s bytes)", std_unit(self.input_packetcount), std_unit_dec(self._conn.input_bytecount), std_unit(self.output_packetcount), std_unit_dec(self._conn.output_bytecount)) except: log.error("error closing %s", self._conn, exc_info=True) self._conn = None self.terminate_queue_threads() self.idle_add(self.clean) log("Protocol.close() done")
def do_control_file_command(self, command_type, client_uuids, filename, source_flag_name, send_file_args): #find the clients: sources = self._control_get_sources(client_uuids) if not sources: raise ControlError("no clients found matching: %s" % client_uuids) def checksize(file_size): if file_size > self.file_transfer.file_size_limit: raise ControlError( "file '%s' is too large: %sB (limit is %sB)" % (filename, std_unit(file_size), std_unit(self.file_transfer.file_size_limit))) #find the file and load it: actual_filename = os.path.abspath(os.path.expanduser(filename)) try: stat = os.stat(actual_filename) log("os.stat(%s)=%s", actual_filename, stat) except os.error: log("os.stat(%s)", actual_filename, exc_info=True) else: checksize(stat.st_size) if not os.path.exists(actual_filename): raise ControlError("file '%s' does not exist" % filename) data = load_binary_file(actual_filename) if data is None: raise ControlError("failed to load '%s'" % actual_filename) #verify size: file_size = len(data) checksize(file_size) #send it to each client: for ss in sources: #ie: ServerSource.file_transfer (found in FileTransferAttributes) if not getattr(ss, source_flag_name, False): #skip the warning if the client is not interactive #(for now just check for 'top' client): if ss.client_type == "top": l = log else: l = log.warn l("Warning: cannot %s '%s' to %s client", command_type, filename, ss.client_type) l(" client %s does not support this feature", ss.uuid) elif file_size > ss.file_size_limit: log.warn("Warning: cannot %s '%s'", command_type, filename) log.warn(" client %s file size limit is %sB (file is %sB)", ss, std_unit(ss.file_size_limit), std_unit(file_size)) else: ss.send_file(filename, "", data, file_size, *send_file_args) return "%s of '%s' to %s initiated" % (command_type, filename, client_uuids)
def parse_server_capabilities(self): c = self.server_capabilities self.mmap_enabled = self.supports_mmap and self.mmap_enabled and c.boolget( "mmap_enabled") if self.mmap_enabled: from xpra.net.mmap_pipe import read_mmap_token, DEFAULT_TOKEN_INDEX, DEFAULT_TOKEN_BYTES def iget(attrname, default_value=0): return c.intget("mmap_%s" % attrname) or c.intget( "mmap.%s" % attrname) or default_value mmap_token = iget("token") mmap_token_index = iget("token_index", DEFAULT_TOKEN_INDEX) mmap_token_bytes = iget("token_bytes", DEFAULT_TOKEN_BYTES) token = read_mmap_token(self.mmap, mmap_token_index, mmap_token_bytes) if token != mmap_token: log.error("Error: mmap token verification failed!") log.error(" expected '%#x'", token) log.error(" found '%#x'", mmap_token) self.mmap_enabled = False self.quit(EXIT_MMAP_TOKEN_FAILURE) return log.info( "enabled fast mmap transfers using %sB shared memory area", std_unit(self.mmap_size, unit=1024)) #the server will have a handle on the mmap file by now, safe to delete: self.clean_mmap() return True
def close(self): if self._closed: return self._closed = True scheduler.idle_add(self._process_packet_cb, self, [Protocol.CONNECTION_LOST]) if self._conn: try: self._conn.close() log.info("connection closed after %s packets received (%s bytes) and %s packets sent (%s bytes)", std_unit(self.input_packetcount), std_unit_dec(self._conn.input_bytecount), std_unit(self.output_packetcount), std_unit_dec(self._conn.output_bytecount) ) except: log.error("error closing %s", self._conn, exc_info=True) self._conn = None self.terminate_io_threads() scheduler.idle_add(self.clean)
def parse_client_caps(self, c): self.mmap_client_namespace = c.boolget("mmap.namespace", False) sep = ["_", "."][self.mmap_client_namespace] def mmapattr(k): return "mmap%s%s" % (sep, k) mmap_filename = c.strget(mmapattr("file")) if not mmap_filename: return mmap_size = c.intget(mmapattr("size"), 0) log("client supplied mmap_file=%s", mmap_filename) mmap_token = c.intget(mmapattr("token")) log("mmap supported=%s, token=%s", self.supports_mmap, mmap_token) if self.mmap_filename: log("using global server specified mmap file path: '%s'", self.mmap_filename) mmap_filename = self.mmap_filename if not self.supports_mmap: log("client enabled mmap but mmap mode is not supported", mmap_filename) elif WIN32 and mmap_filename.startswith("/"): log("mmap_file '%s' is a unix path", mmap_filename) elif not os.path.exists(mmap_filename): log("mmap_file '%s' cannot be found!", mmap_filename) else: from xpra.net.mmap_pipe import init_server_mmap, read_mmap_token, write_mmap_token, DEFAULT_TOKEN_INDEX, DEFAULT_TOKEN_BYTES self.mmap, self.mmap_size = init_server_mmap(mmap_filename, mmap_size) log("found client mmap area: %s, %i bytes - min mmap size=%i", self.mmap, self.mmap_size, self.min_mmap_size) if self.mmap_size>0: index = c.intget(mmapattr("token_index"), DEFAULT_TOKEN_INDEX) count = c.intget(mmapattr("token_bytes"), DEFAULT_TOKEN_BYTES) v = read_mmap_token(self.mmap, index, count) log("mmap_token=%#x, verification=%#x", mmap_token, v) if v!=mmap_token: log.warn("Warning: mmap token verification failed, not using mmap area!") log.warn(" expected '%#x', found '%#x'", mmap_token, v) self.mmap.close() self.mmap = None self.mmap_size = 0 elif self.mmap_size<self.min_mmap_size: log.warn("Warning: client supplied mmap area is too small, discarding it") log.warn(" we need at least %iMB and this area is %iMB", self.min_mmap_size//1024//1024, self.mmap_size//1024//1024) self.mmap.close() self.mmap = None self.mmap_size = 0 else: from xpra.os_util import get_int_uuid self.mmap_client_token = get_int_uuid() self.mmap_client_token_bytes = DEFAULT_TOKEN_BYTES if c.intget("mmap_token_index"): #we can write the token anywhere we want and tell the client, #so write it right at the end: self.mmap_client_token_index = self.mmap_size-self.mmap_client_token_bytes else: #use the expected default for older versions: self.mmap_client_token_index = DEFAULT_TOKEN_INDEX write_mmap_token(self.mmap, self.mmap_client_token, self.mmap_client_token_index, self.mmap_client_token_bytes) if self.mmap_size>0: log.info(" mmap is enabled using %sB area in %s", std_unit(self.mmap_size, unit=1024), mmap_filename)
def _process_bandwidth_limit(self, proto, packet): ss = self._server_sources.get(proto) if not ss: return bandwidth_limit = packet[1] if self.bandwidth_limit: bandwidth_limit = min(self.bandwidth_limit, bandwidth_limit) ss.bandwidth_limit = bandwidth_limit bandwidthlog.info("bandwidth-limit changed to %sbps for client %i", std_unit(bandwidth_limit), ss.counter)
def close(self): if self._closed: return self._closed = True scheduler.idle_add(self._process_packet_cb, self, [Protocol.CONNECTION_LOST]) if self._conn: try: self._conn.close() log.info( "connection closed after %s packets received (%s bytes) and %s packets sent (%s bytes)", std_unit(self.input_packetcount), std_unit_dec(self._conn.input_bytecount), std_unit(self.output_packetcount), std_unit_dec(self._conn.output_bytecount)) except: log.error("error closing %s", self._conn, exc_info=True) self._conn = None self.terminate_io_threads() scheduler.idle_add(self.clean)
def _audio_info(self, ci, mode="speaker"): minfo = self.dictget(ci, "sound", mode) if not minfo: return "%s off" % mode minfo = typedict(minfo) audio_info = "%s: %s" % (mode, minfo.strget("codec_description") or \ minfo.strget("codec") or \ minfo.strget("state", "unknown")) bitrate = minfo.intget("bitrate") if bitrate: audio_info += " %sbps" % std_unit(bitrate) return audio_info
def send(self, packet): if self._closed: log("connection is closed already, not sending packet") return if log.is_debug_enabled(): if len(packet)<=16: log("send(%i bytes: %s)", len(packet), hexstr(packet)) else: from xpra.simple_stats import std_unit log("send(%sBytes: %s..)", std_unit(len(packet)), hexstr(packet[:16])) if self._write_thread is None: self.start_write_thread() self._write_queue.put(packet)
def close(self): debug("close() closed=%s", self._closed) if self._closed: return self._closed = True self.scheduler.idle_add(self._process_packet_cb, self, [Protocol.CONNECTION_LOST]) if self._conn: try: self._conn.close() if self._log_stats is None and self._conn.input_bytecount==0 and self._conn.output_bytecount==0: #no data sent or received, skip logging of stats: self._log_stats = False if self._log_stats: log.info("connection closed after %s packets received (%s bytes) and %s packets sent (%s bytes)", std_unit(self.input_packetcount), std_unit_dec(self._conn.input_bytecount), std_unit(self.output_packetcount), std_unit_dec(self._conn.output_bytecount) ) except: log.error("error closing %s", self._conn, exc_info=True) self._conn = None self.terminate_queue_threads() self.scheduler.idle_add(self.clean)
def main(): from xpra.platform import program_context with program_context("Network-Speed", "Network Speed Query Tool"): from xpra.net.net_util import get_interfaces from xpra.simple_stats import std_unit interfaces = get_interfaces() for iface in interfaces: speed = get_interface_info(0, iface).get("speed", 0) try: v = int(speed) s = "%sbps" % std_unit(v) print("%s : %s" % (iface, s)) except ValueError: logger().error("Error: parsing speed value '%s'", speed, exc_info=True)
def transfer_progress_update(self, send=True, transfer_id=0, elapsed=0, position=0, total=0, error=None): buttons = self.progress_bars.get(transfer_id) if not buttons: #we're not tracking this transfer: no progress bar return pb, stop_btn = buttons log("transfer_progress_update%s pb=%s", (send, transfer_id, elapsed, position, total, error), pb) if error: self.progress_bars[transfer_id] = (pb, None) stop_btn.hide() pb.set_text("Error: %s, file transfer aborted" % error) GLib.timeout_add(REMOVE_ENTRY_DELAY*1000, self.remove_entry, transfer_id) return if pb: pb.set_fraction(position/total) pb.set_text("%sB of %s" % (std_unit(position), std_unit(total))) pb.set_show_text(True) if position==total: self.progress_bars[transfer_id] = (pb, None) stop_btn.hide() pb.set_text("Complete: %sB" % std_unit(total)) pb.set_show_text(True) GLib.timeout_add(REMOVE_ENTRY_DELAY*1000, self.remove_entry, transfer_id)
def _process_bandwidth_limit(self, proto, packet): ss = self._server_sources.get(proto) if not ss: return bandwidth_limit = packet[1] if self.bandwidth_limit and bandwidth_limit>self.bandwidth_limit or bandwidth_limit<=0: bandwidth_limit = self.bandwidth_limit or 0 if ss.bandwidth_limit==bandwidth_limit: #unchanged return ss.bandwidth_limit = bandwidth_limit if bandwidth_limit==0: bandwidthlog.info("bandwidth-limit restrictions removed for client %i", ss.counter) else: bandwidthlog.info("bandwidth-limit changed to %sbps for client %i", std_unit(bandwidth_limit), ss.counter)
def _process_ack_file_chunk(self, packet): #the other end received our send-file or send-file-chunk, #send some more file data filelog("ack-file-chunk: %s", packet[1:]) chunk_id, state, error_message, chunk = packet[1:5] if not state: filelog.error("Error: remote end is cancelling the file transfer:") filelog.error(" %s", error_message) del self.send_chunks_in_progress[chunk_id] return chunk_id = bytestostr(chunk_id) chunk_state = self.send_chunks_in_progress.get(chunk_id) if not chunk_state: filelog.error("Error: cannot find the file transfer id '%s'", nonl(chunk_id)) return if chunk_state[-1] != chunk: filelog.error("Error: chunk number mismatch (%i vs %i)", chunk_state, chunk) del self.send_chunks_in_progress[chunk_id] return start_time, data, chunk_size, timer, chunk = chunk_state if not data: #all sent! elapsed = monotonic_time() - start_time filelog("%i chunks of %i bytes sent in %ims (%sB/s)", chunk, chunk_size, elapsed * 1000, std_unit(chunk * chunk_size / elapsed)) del self.send_chunks_in_progress[chunk_id] return assert chunk_size > 0 #carve out another chunk: cdata = self.compressed_wrapper("file-data", data[:chunk_size]) data = data[chunk_size:] chunk += 1 if timer: self.source_remove(timer) timer = self.timeout_add(CHUNK_TIMEOUT, self._check_chunk_sending, chunk_id, chunk) self.send_chunks_in_progress[chunk_id] = [ start_time, data, chunk_size, timer, chunk ] self.send("send-file-chunk", chunk_id, chunk, cdata, bool(data))
def _process_request_file(self, proto, packet): ss = self.get_server_source(proto) if not ss: printlog.warn( "Warning: invalid client source for send-data-response packet") return try: argf = packet[1].decode("utf-8") except UnicodeDecodeError: argf = bytestostr(packet[1]) openit = packet[2] filename = os.path.abspath(osexpand(argf)) if not os.path.exists(filename): filelog.warn("Warning: the file requested does not exist:") filelog.warn(" %s", filename) ss.may_notify(XPRA_FILETRANSFER_NOTIFICATION_ID, "File not found", "The file requested does not exist:\n%s" % filename, icon_name="file") return try: stat = os.stat(filename) filelog("os.stat(%s)=%s", filename, stat) except os.error: filelog("os.stat(%s)", filename, exc_info=True) else: file_size = stat.st_size if file_size > self.file_transfer.file_size_limit or file_size > ss.file_size_limit: ss.may_notify( XPRA_FILETRANSFER_NOTIFICATION_ID, "File too large", "The file requested is too large to send:\n%s\nis %s" % (argf, std_unit(file_size)), icon_name="file") return data = load_binary_file(filename) ss.send_file(filename, "", data, len(data), openit=openit, options={"request-file": (argf, openit)})
def file_size_warning(self, action, location, basefilename, filesize, limit): if self.file_size_dialog: #close previous warning self.file_size_dialog.destroy() self.file_size_dialog = None parent = None msgs = ( "Warning: cannot %s the file '%s'" % (action, basefilename), "this file is too large: %sB" % std_unit(filesize, unit=1024), "the %s file size limit is %iMB" % (location, limit), ) self.file_size_dialog = gtk.MessageDialog(parent, DIALOG_DESTROY_WITH_PARENT, MESSAGE_INFO, BUTTONS_CLOSE, "\n".join(msgs)) try: image = gtk.image_new_from_stock(gtk.STOCK_DIALOG_WARNING, 64) self.file_size_dialog.set_image(image) except Exception as e: log.warn("failed to set dialog image: %s", e) self.file_size_dialog.connect("response", self.close_file_size_warning) self.file_size_dialog.show()
def validate_size(size): assert size >= 64 * 1024 * 1024, "mmap size is too small: %sB (minimum is 64MB)" % std_unit( size) assert size <= 4 * 1024 * 1024 * 1024, "mmap is too big: %sB (maximum is 4GB)" % std_unit( size)
def init_client_mmap(mmap_group=None, socket_filename=None, size=128*1024*1024, filename=None): """ Initializes an mmap area, writes the token in it and returns: (success flag, mmap_area, mmap_size, temp_file, mmap_filename) The caller must keep hold of temp_file to ensure it does not get deleted! This is used by the client. """ def rerr(): return False, False, None, 0, None, None log("init_mmap%s", (mmap_group, socket_filename, size, filename)) mmap_filename = filename mmap_temp_file = None delete = True try: import mmap unit = max(4096, mmap.PAGESIZE) #add 8 bytes for the mmap area control header zone: mmap_size = roundup(size + 8, unit) if WIN32: if not filename: from xpra.net.crypto import get_hex_uuid filename = "xpra-%s" % get_hex_uuid() mmap_filename = filename mmap_area = mmap.mmap(0, mmap_size, filename) #not a real file: delete = False mmap_temp_file = None else: assert POSIX if filename: if os.path.exists(filename): fd = os.open(filename, os.O_EXCL | os.O_RDWR) mmap_size = os.path.getsize(mmap_filename) #mmap_size = 4*1024*1024 #size restriction needed with ivshmem delete = False log.info("Using existing mmap file '%s': %sMB", mmap_filename, mmap_size//1024//1024) else: import errno flags = os.O_CREAT | os.O_EXCL | os.O_RDWR try: fd = os.open(filename, flags) mmap_temp_file = None #os.fdopen(fd, 'w') mmap_filename = filename except OSError as e: if e.errno == errno.EEXIST: log.error("Error: the mmap file '%s' already exists", filename) return rerr() raise else: import tempfile from xpra.platform.paths import get_mmap_dir mmap_dir = get_mmap_dir() subs = os.environ.copy() subs.update({ "UID" : os.getuid(), "GID" : os.getgid(), "PID" : os.getpid(), }) mmap_dir = shellsub(mmap_dir, subs) if mmap_dir and not os.path.exists(mmap_dir): os.mkdir(mmap_dir, 0o700) if not mmap_dir or not os.path.exists(mmap_dir): raise Exception("mmap directory %s does not exist!" % mmap_dir) #create the mmap file, the mkstemp that is called via NamedTemporaryFile ensures #that the file is readable and writable only by the creating user ID try: temp = tempfile.NamedTemporaryFile(prefix="xpra.", suffix=".mmap", dir=mmap_dir) except OSError as e: log.error("Error: cannot create mmap file:") log.error(" %s", e) return rerr() #keep a reference to it so it does not disappear! mmap_temp_file = temp mmap_filename = temp.name fd = temp.file.fileno() #set the group permissions and gid if the mmap-group option is specified if mmap_group and type(socket_filename)==str and os.path.exists(socket_filename): from stat import S_IRUSR,S_IWUSR,S_IRGRP,S_IWGRP s = os.stat(socket_filename) os.fchown(fd, -1, s.st_gid) os.fchmod(fd, S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP) assert mmap_size>=1024*1024, "mmap size is too small: %sB (minimum is 1MB)" % std_unit(mmap_size) assert mmap_size<=1024*1024*1024, "mmap is too big: %sB (maximum is 1GB)" % std_unit(mmap_size) log("using mmap file %s, fd=%s, size=%s", mmap_filename, fd, mmap_size) os.lseek(fd, mmap_size-1, os.SEEK_SET) assert os.write(fd, b'\x00') os.lseek(fd, 0, os.SEEK_SET) mmap_area = mmap.mmap(fd, length=mmap_size) return True, delete, mmap_area, mmap_size, mmap_temp_file, mmap_filename except Exception as e: log("failed to setup mmap: %s", e, exc_info=True) log.error("Error: mmap setup failed:") log.error(" %s", e) clean_mmap(mmap_filename) return rerr()
def _process_send_file(self, packet): #the remote end is sending us a file start = monotonic() basefilename, mimetype, printit, openit, filesize, file_data, options = packet[1:8] send_id = "" if len(packet)>=9: send_id = net_utf8(packet[8]) #basefilename should be utf8: basefilename = net_utf8(basefilename) mimetype = net_utf8(mimetype) if filesize<=0: filelog.error("Error: invalid file size: %s", filesize) filelog.error(" file transfer aborted for %r", basefilename) return args = (send_id, "file", basefilename, printit, openit) r = self.accept_data(*args) filelog("%s%s=%s", self.accept_data, args, r) if r is None: filelog.warn("Warning: %s rejected for file '%s'", ("transfer", "printing")[bool(printit)], basefilename) return #accept_data can override the flags: printit, openit = r options = typedict(options) if printit: l = printlog assert self.printing else: l = filelog assert self.file_transfer l("receiving file: %s", [basefilename, mimetype, printit, openit, filesize, "%s bytes" % len(file_data), options]) if filesize>self.file_size_limit: l.error("Error: file '%s' is too large:", basefilename) l.error(" %sB, the file size limit is %sB", std_unit(filesize), std_unit(self.file_size_limit)) return chunk_id = options.strget("file-chunk-id") try: filename, fd = safe_open_download_file(basefilename, mimetype) except OSError as e: filelog("cannot save file %s / %s", basefilename, mimetype, exc_info=True) filelog.error("Error: failed to save downloaded file") filelog.error(" %s", e) if chunk_id: self.send("ack-file-chunk", chunk_id, False, "failed to create file: %s" % e, 0) return self.file_descriptors.add(fd) if chunk_id: l = len(self.receive_chunks_in_progress) if l>=MAX_CONCURRENT_FILES: self.send("ack-file-chunk", chunk_id, False, "too many file transfers in progress: %i" % l, 0) os.close(fd) return digest = hashlib.sha256() chunk = 0 timer = self.timeout_add(CHUNK_TIMEOUT, self._check_chunk_receiving, chunk_id, chunk) chunk_state = [ monotonic(), fd, filename, mimetype, printit, openit, filesize, options, digest, 0, False, send_id, timer, chunk, ] self.receive_chunks_in_progress[chunk_id] = chunk_state self.send("ack-file-chunk", chunk_id, True, "", chunk) return #not chunked, full file: assert file_data, "no data, got %s" % (file_data,) if len(file_data)!=filesize: l.error("Error: invalid data size for file '%s'", basefilename) l.error(" received %i bytes, expected %i bytes", len(file_data), filesize) return #check digest if present: def check_digest(algo="sha256", libfn=hashlib.sha256): digest = options.get(algo) if digest: h = libfn() h.update(file_data) l("%s digest: %s - expected: %s", algo, h.hexdigest(), digest) if digest!=h.hexdigest(): self.digest_mismatch(filename, digest, h.hexdigest(), algo) check_digest("sha256", hashlib.sha256) check_digest("sha1", hashlib.sha1) check_digest("md5", hashlib.md5) try: os.write(fd, file_data) finally: os.close(fd) self.transfer_progress_update(False, send_id, monotonic()-start, filesize, filesize, None) self.process_downloaded_file(filename, mimetype, printit, openit, filesize, options)
def main(): from xpra.util import print_nested_dict, csv from xpra.platform import program_context from xpra.platform.netdev_query import get_interface_speed from xpra.log import enable_color, add_debug_category, enable_debug_for with program_context("Network-Info", "Network Info"): enable_color() verbose = "-v" in sys.argv or "--verbose" in sys.argv if verbose: enable_debug_for("network") add_debug_category("network") log.enable_debug() print("Network interfaces found:") for iface in get_interfaces(): if if_nametoindex: s = "* %s (index=%s)" % (iface.ljust(20), if_nametoindex(iface)) else: s = "* %s" % iface speed = get_interface_speed(0, iface) if speed > 0: from xpra.simple_stats import std_unit s += " (speed=%sbps)" % std_unit(speed) print(s) def pver(v): if type(v) in (tuple, list): s = "" for i in range(len(v)): if i > 0: #dot seperated numbers if type(v[i - 1]) == int: s += "." else: s += ", " s += str(v[i]) return s if type(v) == bytes: from xpra.os_util import bytestostr v = bytestostr(v) if type(v) == str and v.startswith("v"): return v[1:] return str(v) print("Gateways found:") for gt, idefs in get_gateways().items(): print("* %s" % gt) #ie: "INET" for i, idef in enumerate(idefs): try: if isinstance(idef, (list, tuple)): print(" [%i] %s" % (i, csv(idef))) continue except: print(" [%i] %s" % (i, idef)) print("") print("Protocol Capabilities:") netcaps = get_network_caps() netif = {"": has_netifaces} if netifaces_version: netif["version"] = netifaces_version netcaps["netifaces"] = netif print_nested_dict(netcaps) print("") print("Network Config:") print_nested_dict(get_net_config()) net_sys = get_net_sys_config() if net_sys: print("") print("Network System Config:") print_nested_dict(net_sys) print("") print("SSL:") print_nested_dict(get_ssl_info()) try: from xpra.net.crypto import crypto_backend_init, get_crypto_caps crypto_backend_init() ccaps = get_crypto_caps() if ccaps: print("") print("Crypto Capabilities:") print_nested_dict(ccaps) except Exception as e: print("No Crypto:") print(" %s" % e)
def sizeerr(size): self.warn_and_quit(EXIT_FILE_TOO_BIG, "the file is too large: %sB (the file size limit is %sB)" % ( std_unit(size), std_unit(self.file_size_limit))) return
def _process_ack_file_chunk(self, packet): #the other end received our send-file or send-file-chunk, #send some more file data filelog("ack-file-chunk: %s", packet[1:]) chunk_id, state, error_message, chunk = packet[1:5] if not state: filelog.error("Error: remote end is cancelling the file transfer:") filelog.error(" %s", error_message) del self.send_chunks_in_progress[chunk_id] return chunk_state = self.send_chunks_in_progress.get(chunk_id) if not chunk_state: filelog.error("Error: cannot find the file transfer id '%s'", nonl(chunk_id)) return if chunk_state[-1]!=chunk: filelog.error("Error: chunk number mismatch (%i vs %i)", chunk_state, chunk) del self.send_chunks_in_progress[chunk_id] return start_time, data, chunk_size, timer, chunk = chunk_state if not data: #all sent! elapsed = time.time()-start_time filelog("%i chunks of %i bytes sent in %ims (%sB/s)", chunk, chunk_size, elapsed*1000, std_unit(chunk*chunk_size/elapsed)) del self.send_chunks_in_progress[chunk_id] return assert chunk_size>0 #carve out another chunk: cdata = self.compressed_wrapper("file-data", data[:chunk_size]) data = data[chunk_size:] chunk += 1 if timer: self.source_remove(timer) timer = self.timeout_add(CHUNK_TIMEOUT, self._check_chunk_sending, chunk_id, chunk) self.send_chunks_in_progress[chunk_id] = [start_time, data, chunk_size, timer, chunk] self.send("send-file-chunk", chunk_id, chunk, cdata, bool(data))
def file_size_warning(self, action, location, basefilename, filesize, limit): filelog.warn("Warning: cannot %s the file '%s'", action, basefilename) filelog.warn(" this file is too large: %sB", std_unit(filesize, unit=1024)) filelog.warn(" the %s file size limit is %iMB", location, limit)
def checksize(file_size): if file_size>self.file_transfer.file_size_limit: raise ControlError("file '%s' is too large: %sB (limit is %sB)" % ( filename, std_unit(file_size), std_unit(self.file_transfer.file_size_limit)))