def __init__(self, service, channel, config={}, _lazy=False): self._closed = True self._config = DEFAULT_CONFIG.copy() self._config.update(config) if self._config["connid"] is None: self._config["connid"] = "conn%d" % ( next(_connection_id_generator), ) self._channel = channel self._seqcounter = itertools.count() self._recvlock = Lock() self._sendlock = Lock() self._sync_replies = {} self._sync_lock = RLock() self._sync_event = Event() self._async_callbacks = {} self._local_objects = RefCountingColl() self._last_traceback = None self._proxy_cache = WeakValueDict() self._netref_classes_cache = {} self._remote_root = None self._local_root = service(weakref.proxy(self)) if not _lazy: self._init_service() self._closed = False
def __init__(self, root, channel, config={}): self._closed = True self._config = DEFAULT_CONFIG.copy() self._config.update(config) if self._config["connid"] is None: self._config["connid"] = "conn%d" % ( next(_connection_id_generator), ) self._HANDLERS = self._request_handlers() self._channel = channel self._seqcounter = itertools.count() self._recvlock = Lock() self._sendlock = Lock() self._sync_replies = {} self._sync_lock = RLock() self._sync_event = Event() self._async_callbacks = {} self._local_objects = RefCountingColl() self._last_traceback = None self._proxy_cache = WeakValueDict() self._netref_classes_cache = {} self._remote_root = None self._send_queue = [] self._local_root = root self._closed = False
def __init__(self, service, channel, config = {}, _lazy = False): self._closed = True self._config = DEFAULT_CONFIG.copy() self._config.update(config) if self._config["connid"] is None: self._config["connid"] = "conn%d" % (next(_connection_id_generator),) self._channel = channel self._seqcounter = itertools.count() self._recvlock = Lock() self._sendlock = Lock() self._sync_replies = {} self._async_callbacks = {} self._local_objects = RefCountingColl() self._last_traceback = None self._proxy_cache = WeakValueDict() self._netref_classes_cache = {} self._remote_root = None self._local_root = service(weakref.proxy(self)) if not _lazy: self._init_service() self._closed = False
def __init__(self, root, channel, config={}): self._closed = True self._config = DEFAULT_CONFIG.copy() self._config.update(config) if self._config["connid"] is None: self._config["connid"] = "conn%d" % (next(_connection_id_generator),) self._HANDLERS = self._request_handlers() self._channel = channel self._seqcounter = itertools.count() self._recvlock = Lock() self._sendlock = Lock() self._recv_event = Condition() self._request_callbacks = {} self._local_objects = RefCountingColl() self._last_traceback = None self._proxy_cache = WeakValueDict() self._netref_classes_cache = {} self._remote_root = None self._send_queue = [] self._local_root = root self._closed = False
class Connection(object): """The RPyC *connection* (AKA *protocol*). :param root: the :class:`~rpyc.core.service.Service` object to expose :param channel: the :class:`~rpyc.core.channel.Channel` over which messages are passed :param config: the connection's configuration dict (overriding parameters from the :data:`default configuration <DEFAULT_CONFIG>`) """ def __init__(self, root, channel, config={}): self._closed = True self._config = DEFAULT_CONFIG.copy() self._config.update(config) if self._config["connid"] is None: self._config["connid"] = "conn%d" % ( next(_connection_id_generator), ) self._HANDLERS = self._request_handlers() self._channel = channel self._seqcounter = itertools.count() self._recvlock = Lock() self._sendlock = Lock() self._recv_event = Condition() self._request_callbacks = {} self._local_objects = RefCountingColl() self._last_traceback = None self._proxy_cache = WeakValueDict() self._netref_classes_cache = {} self._remote_root = None self._send_queue = [] self._local_root = root self._closed = False def __del__(self): self.close() def __enter__(self): return self def __exit__(self, t, v, tb): self.close() def __repr__(self): a, b = object.__repr__(self).split(" object ") return "%s %r object %s" % (a, self._config["connid"], b) def _cleanup(self, _anyway=True): # IO if self._closed and not _anyway: return self._closed = True self._channel.close() self._local_root.on_disconnect(self) self._request_callbacks.clear() self._local_objects.clear() self._proxy_cache.clear() self._netref_classes_cache.clear() self._last_traceback = None self._remote_root = None self._local_root = None # self._seqcounter = None # self._config.clear() del self._HANDLERS def close(self, _catchall=True): # IO """closes the connection, releasing all held resources""" if self._closed: return self._closed = True try: self._async_request(consts.HANDLE_CLOSE) except EOFError: pass except Exception: if not _catchall: raise finally: self._cleanup(_anyway=True) @property def closed(self): # IO """Indicates whether the connection has been closed or not""" return self._closed def fileno(self): # IO """Returns the connectin's underlying file descriptor""" return self._channel.fileno() def ping(self, data=None, timeout=3): # IO """Asserts that the other party is functioning properly, by making sure the *data* is echoed back before the *timeout* expires :param data: the data to send (leave ``None`` for the default buffer) :param timeout: the maximal time to wait for echo :raises: :class:`PingError` if the echoed data does not match :raises: :class:`EOFError` if the remote host closes the connection """ if data is None: data = "abcdefghijklmnopqrstuvwxyz" * 20 res = self.async_request(consts.HANDLE_PING, data, timeout=timeout) if res.value != data: raise PingError("echo mismatches sent data") def _get_seq_id(self): # IO return next(self._seqcounter) def _send(self, msg, seq, args): # IO data = brine.dump((msg, seq, args)) # GC might run while sending data # if so, a BaseNetref.__del__ might be called # BaseNetref.__del__ must call asyncreq, # which will cause a deadlock # Solution: # Add the current request to a queue and let the thread that currently # holds the sendlock send it when it's done with its current job. # NOTE: Atomic list operations should be thread safe, # please call me out if they are not on all implementations! self._send_queue.append(data) # It is crucial to check the queue each time AFTER releasing the lock: while self._send_queue: if not self._sendlock.acquire(False): # Another thread holds the lock. It will send the data after # it's done with its current job. We can safely return. return try: # Can happen if another consumer was scheduled in between # `while` and `acquire`: if not self._send_queue: # Must `continue` to ensure that `send_queue` is checked # after releasing the lock! (in case another producer is # scheduled before `release`) continue data = self._send_queue.pop(0) self._channel.send(data) finally: self._sendlock.release() def _box(self, obj): # boxing """store a local object in such a way that it could be recreated on the remote party either by-value or by-reference""" if brine.dumpable(obj): return consts.LABEL_VALUE, obj if type(obj) is tuple: return consts.LABEL_TUPLE, tuple(self._box(item) for item in obj) elif isinstance(obj, netref.BaseNetref) and obj.____conn__ is self: return consts.LABEL_LOCAL_REF, obj.____id_pack__ else: id_pack = get_id_pack(obj) self._local_objects.add(id_pack, obj) return consts.LABEL_REMOTE_REF, id_pack def _unbox(self, package): # boxing """recreate a local object representation of the remote object: if the object is passed by value, just return it; if the object is passed by reference, create a netref to it""" label, value = package if label == consts.LABEL_VALUE: return value if label == consts.LABEL_TUPLE: return tuple(self._unbox(item) for item in value) if label == consts.LABEL_LOCAL_REF: return self._local_objects[value] if label == consts.LABEL_REMOTE_REF: id_pack = (str(value[0]), value[1], value[2] ) # so value is a id_pack if id_pack in self._proxy_cache: proxy = self._proxy_cache[id_pack] proxy.____refcount__ += 1 # if cached then remote incremented refcount, so sync refcount else: proxy = self._netref_factory(id_pack) self._proxy_cache[id_pack] = proxy return proxy raise ValueError("invalid label %r" % (label, )) def _netref_factory(self, id_pack): # boxing """id_pack is for remote, so when class id fails to directly match """ cls = None if id_pack[2] == 0 and id_pack in self._netref_classes_cache: cls = self._netref_classes_cache[id_pack] elif id_pack[0] in netref.builtin_classes_cache: cls = netref.builtin_classes_cache[id_pack[0]] if cls is None: # in the future, it could see if a sys.module cache/lookup hits first cls_methods = self.sync_request(consts.HANDLE_INSPECT, id_pack) cls = netref.class_factory(id_pack, cls_methods) if id_pack[2] == 0: # only use cached netrefs for classes # ... instance caching after gc of a proxy will take some mental gymnastics self._netref_classes_cache[id_pack] = cls return cls(self, id_pack) def _dispatch_request(self, seq, raw_args): # dispatch try: handler, args = raw_args args = self._unbox(args) res = self._HANDLERS[handler](self, *args) except: # TODO: revist how to catch handle locally, this should simplify when py2 is dropped # need to catch old style exceptions too t, v, tb = sys.exc_info() self._last_traceback = tb logger = self._config["logger"] if logger and t is not StopIteration: logger.debug("Exception caught", exc_info=True) if t is SystemExit and self._config["propagate_SystemExit_locally"]: raise if t is KeyboardInterrupt and self._config[ "propagate_KeyboardInterrupt_locally"]: raise self._send(consts.MSG_EXCEPTION, seq, self._box_exc(t, v, tb)) else: self._send(consts.MSG_REPLY, seq, self._box(res)) def _box_exc(self, typ, val, tb): # dispatch? return vinegar.dump( typ, val, tb, include_local_traceback=self._config["include_local_traceback"], include_local_version=self._config["include_local_version"]) def _unbox_exc(self, raw): # dispatch? return vinegar.load( raw, import_custom_exceptions=self._config["import_custom_exceptions"], instantiate_custom_exceptions=self. _config["instantiate_custom_exceptions"], instantiate_oldstyle_exceptions=self. _config["instantiate_oldstyle_exceptions"]) def _seq_request_callback(self, msg, seq, is_exc, obj): _callback = self._request_callbacks.pop(seq, None) if _callback is not None: _callback(is_exc, obj) elif self._config["logger"] is not None: debug_msg = 'Recieved {} seq {} and a related request callback did not exist' self._config["logger"].debug(debug_msg.format(msg, seq)) def _dispatch(self, data): # serving---dispatch? msg, seq, args = brine.load(data) if msg == consts.MSG_REQUEST: self._dispatch_request(seq, args) elif msg == consts.MSG_REPLY: obj = self._unbox(args) self._seq_request_callback(msg, seq, False, obj) elif msg == consts.MSG_EXCEPTION: obj = self._unbox_exc(args) self._seq_request_callback(msg, seq, True, obj) else: raise ValueError("invalid message type: %r" % (msg, )) def serve(self, timeout=1, wait_for_lock=True): # serving """Serves a single request or reply that arrives within the given time frame (default is 1 sec). Note that the dispatching of a request might trigger multiple (nested) requests, thus this function may be reentrant. :returns: ``True`` if a request or reply were received, ``False`` otherwise. """ timeout = Timeout(timeout) with self._recv_event: if not self._recvlock.acquire(False): return wait_for_lock and self._recv_event.wait( timeout.timeleft()) try: data = self._channel.poll(timeout) and self._channel.recv() if not data: return False except EOFError: self.close() raise finally: self._recvlock.release() with self._recv_event: self._recv_event.notify_all() self._dispatch(data) return True def poll(self, timeout=0): # serving """Serves a single transaction, should one arrives in the given interval. Note that handling a request/reply may trigger nested requests, which are all part of a single transaction. :returns: ``True`` if a transaction was served, ``False`` otherwise""" return self.serve(timeout, False) def serve_all(self): # serving """Serves all requests and replies for as long as the connection is alive.""" try: while not self.closed: self.serve(None) except (socket.error, select_error, IOError): if not self.closed: raise except EOFError: pass finally: self.close() def serve_threaded(self, thread_count=10): # serving """Serves all requests and replies for as long as the connection is alive. CAVEAT: using non-immutable types that require a netref to be constructed to serve a request, or invoking anything else that performs a sync_request, may timeout due to the sync_request reply being received by another thread serving the connection. A more conventional approach where each client thread opens a new connection would allow `ThreadedServer` to naturally avoid such multiplexing issues and is the preferred approach for threading procedures that invoke sync_request. See issue #345 """ def _thread_target(): try: while True: self.serve(None) except (socket.error, select_error, IOError): if not self.closed: raise except EOFError: pass try: threads = [spawn(_thread_target) for _ in range(thread_count)] for thread in threads: thread.join() finally: self.close() def poll_all(self, timeout=0): # serving """Serves all requests and replies that arrive within the given interval. :returns: ``True`` if at least a single transaction was served, ``False`` otherwise """ at_least_once = False timeout = Timeout(timeout) try: while True: if self.poll(timeout): at_least_once = True if timeout.expired(): break except EOFError: pass return at_least_once def sync_request(self, handler, *args): # serving """requests, sends a synchronous request (waits for the reply to arrive) :raises: any exception that the requets may be generated :returns: the result of the request """ timeout = self._config["sync_request_timeout"] return self.async_request(handler, *args, timeout=timeout).value def _async_request(self, handler, args=(), callback=(lambda a, b: None)): # serving seq = self._get_seq_id() self._request_callbacks[seq] = callback try: self._send(consts.MSG_REQUEST, seq, (handler, self._box(args))) except Exception: # TODO: review test_remote_exception, logging exceptions show attempt to write on closed stream # depending on the case, the MSG_REQUEST may or may not have been sent completely # so, pop the callback and raise to keep response integrity is consistent self._request_callbacks.pop(seq, None) raise def async_request(self, handler, *args, **kwargs): # serving """Send an asynchronous request (does not wait for it to finish) :returns: an :class:`rpyc.core.async_.AsyncResult` object, which will eventually hold the result (or exception) """ timeout = kwargs.pop("timeout", None) if kwargs: raise TypeError("got unexpected keyword argument(s) %s" % (list(kwargs.keys()), )) res = AsyncResult(self) self._async_request(handler, args, res) if timeout is not None: res.set_expiry(timeout) return res @property def root(self): # serving """Fetches the root object (service) of the other party""" if self._remote_root is None: self._remote_root = self.sync_request(consts.HANDLE_GETROOT) return self._remote_root def _check_attr(self, obj, name, perm): # attribute access config = self._config if not config[perm]: raise AttributeError("cannot access %r" % (name, )) prefix = config["allow_exposed_attrs"] and config["exposed_prefix"] plain = config["allow_all_attrs"] plain |= config["allow_exposed_attrs"] and name.startswith(prefix) plain |= config["allow_safe_attrs"] and name in config["safe_attrs"] plain |= config["allow_public_attrs"] and not name.startswith("_") has_exposed = prefix and hasattr(obj, prefix + name) if plain and (not has_exposed or hasattr(obj, name)): return name if has_exposed: return prefix + name if plain: return name # chance for better traceback raise AttributeError("cannot access %r" % (name, )) def _access_attr(self, obj, name, args, overrider, param, default): # attribute access if is_py_3k: if type(name) is bytes: name = str(name, "utf8") elif type(name) is not str: raise TypeError("name must be a string") else: if type(name) not in (str, unicode): # noqa raise TypeError("name must be a string") name = str(name) # IronPython issue #10 + py3k issue accessor = getattr(type(obj), overrider, None) if accessor is None: accessor = default name = self._check_attr(obj, name, param) return accessor(obj, name, *args) @classmethod def _request_handlers(cls): # request handlers return { consts.HANDLE_PING: cls._handle_ping, consts.HANDLE_CLOSE: cls._handle_close, consts.HANDLE_GETROOT: cls._handle_getroot, consts.HANDLE_GETATTR: cls._handle_getattr, consts.HANDLE_DELATTR: cls._handle_delattr, consts.HANDLE_SETATTR: cls._handle_setattr, consts.HANDLE_CALL: cls._handle_call, consts.HANDLE_CALLATTR: cls._handle_callattr, consts.HANDLE_REPR: cls._handle_repr, consts.HANDLE_STR: cls._handle_str, consts.HANDLE_CMP: cls._handle_cmp, consts.HANDLE_HASH: cls._handle_hash, consts.HANDLE_INSTANCECHECK: cls._handle_instancecheck, consts.HANDLE_DIR: cls._handle_dir, consts.HANDLE_PICKLE: cls._handle_pickle, consts.HANDLE_DEL: cls._handle_del, consts.HANDLE_INSPECT: cls._handle_inspect, consts.HANDLE_BUFFITER: cls._handle_buffiter, consts.HANDLE_OLDSLICING: cls._handle_oldslicing, consts.HANDLE_CTXEXIT: cls._handle_ctxexit, } def _handle_ping(self, data): # request handler return data def _handle_close(self): # request handler self._cleanup() def _handle_getroot(self): # request handler return self._local_root def _handle_del(self, obj, count=1): # request handler self._local_objects.decref(get_id_pack(obj), count) def _handle_repr(self, obj): # request handler return repr(obj) def _handle_str(self, obj): # request handler return str(obj) def _handle_cmp(self, obj, other, op='__cmp__'): # request handler # cmp() might enter recursive resonance... so use the underlying type and return cmp(obj, other) try: return self._access_attr(type(obj), op, (), "_rpyc_getattr", "allow_getattr", getattr)(obj, other) except Exception: raise def _handle_hash(self, obj): # request handler return hash(obj) def _handle_call(self, obj, args, kwargs=()): # request handler return obj(*args, **dict(kwargs)) def _handle_dir(self, obj): # request handler return tuple(dir(obj)) def _handle_inspect(self, id_pack): # request handler if hasattr(self._local_objects[id_pack], '____conn__'): # When RPyC is chained (RPyC over RPyC), id_pack is cached in local objects as a netref # since __mro__ is not a safe attribute the request is forwarded using the proxy connection # see issue #346 or tests.test_rpyc_over_rpyc.Test_rpyc_over_rpyc conn = self._local_objects[id_pack].____conn__ return conn.sync_request(consts.HANDLE_INSPECT, id_pack) else: return tuple( get_methods(netref.LOCAL_ATTRS, self._local_objects[id_pack])) def _handle_getattr(self, obj, name): # request handler return self._access_attr(obj, name, (), "_rpyc_getattr", "allow_getattr", getattr) def _handle_delattr(self, obj, name): # request handler return self._access_attr(obj, name, (), "_rpyc_delattr", "allow_delattr", delattr) def _handle_setattr(self, obj, name, value): # request handler return self._access_attr(obj, name, (value, ), "_rpyc_setattr", "allow_setattr", setattr) def _handle_callattr(self, obj, name, args, kwargs=()): # request handler obj = self._handle_getattr(obj, name) return self._handle_call(obj, args, kwargs) def _handle_ctxexit(self, obj, exc): # request handler if exc: try: raise exc except Exception: exc, typ, tb = sys.exc_info() else: typ = tb = None return self._handle_getattr(obj, "__exit__")(exc, typ, tb) def _handle_instancecheck(self, obj, other_id_pack): # TODOs: # + refactor cache instancecheck/inspect/class_factory # + improve cache docs if hasattr(obj, '____conn__'): # keep unwrapping! # When RPyC is chained (RPyC over RPyC), id_pack is cached in local objects as a netref # since __mro__ is not a safe attribute the request is forwarded using the proxy connection # relates to issue #346 or tests.test_netref_hierachy.Test_Netref_Hierarchy.test_StandardError conn = obj.____conn__ return conn.sync_request(consts.HANDLE_INSPECT, id_pack) # Create a name pack which would be familiar here and see if there is a hit other_id_pack2 = (other_id_pack[0], other_id_pack[1], 0) if other_id_pack[0] in netref.builtin_classes_cache: cls = netref.builtin_classes_cache[other_id_pack[0]] other = cls(self, other_id_pack) elif other_id_pack2 in self._netref_classes_cache: cls = self._netref_classes_cache[other_id_pack2] other = cls(self, other_id_pack2) else: # might just have missed cache, FIX ME return False return isinstance(other, obj) def _handle_pickle(self, obj, proto): # request handler if not self._config["allow_pickle"]: raise ValueError("pickling is disabled") return bytes(pickle.dumps(obj, proto)) def _handle_buffiter(self, obj, count): # request handler return tuple(itertools.islice(obj, count)) def _handle_oldslicing(self, obj, attempt, fallback, start, stop, args): # request handler try: # first try __xxxitem__ getitem = self._handle_getattr(obj, attempt) return getitem(slice(start, stop), *args) except Exception: # fallback to __xxxslice__. see issue #41 if stop is None: stop = maxint getslice = self._handle_getattr(obj, fallback) return getslice(start, stop, *args)
class Connection(object): """The RPyC *connection* (AKA *protocol*). :param root: the :class:`~rpyc.core.service.Service` object to expose :param channel: the :class:`~rpyc.core.channel.Channel` over which messages are passed :param config: the connection's configuration dict (overriding parameters from the :data:`default configuration <DEFAULT_CONFIG>`) """ def __init__(self, root, channel, config={}): self._closed = True self._config = DEFAULT_CONFIG.copy() self._config.update(config) if self._config["connid"] is None: self._config["connid"] = "conn%d" % ( next(_connection_id_generator), ) self._HANDLERS = self._request_handlers() self._channel = channel self._seqcounter = itertools.count() self._recvlock = Lock() self._sendlock = Lock() self._sync_replies = {} self._sync_lock = RLock() self._sync_event = Event() self._async_callbacks = {} self._local_objects = RefCountingColl() self._last_traceback = None self._proxy_cache = WeakValueDict() self._netref_classes_cache = {} self._remote_root = None self._send_queue = [] self._local_root = root self._closed = False def __del__(self): self.close() def __enter__(self): return self def __exit__(self, t, v, tb): self.close() def __repr__(self): a, b = object.__repr__(self).split(" object ") return "%s %r object %s" % (a, self._config["connid"], b) # # IO # def _cleanup(self, _anyway=True): if self._closed and not _anyway: return self._closed = True self._channel.close() self._local_root.on_disconnect(self) self._sync_replies.clear() self._async_callbacks.clear() self._local_objects.clear() self._proxy_cache.clear() self._netref_classes_cache.clear() self._last_traceback = None self._remote_root = None self._local_root = None #self._seqcounter = None #self._config.clear() del self._HANDLERS def close(self, _catchall=True): """closes the connection, releasing all held resources""" if self._closed: return self._closed = True try: self._async_request(consts.HANDLE_CLOSE) except EOFError: pass except Exception: if not _catchall: raise finally: self._cleanup(_anyway=True) @property def closed(self): """Indicates whether the connection has been closed or not""" return self._closed def fileno(self): """Returns the connectin's underlying file descriptor""" return self._channel.fileno() def ping(self, data=None, timeout=3): """ Asserts that the other party is functioning properly, by making sure the *data* is echoed back before the *timeout* expires :param data: the data to send (leave ``None`` for the default buffer) :param timeout: the maximal time to wait for echo :raises: :class:`PingError` if the echoed data does not match """ if data is None: data = "abcdefghijklmnopqrstuvwxyz" * 20 res = self.async_request(consts.HANDLE_PING, data, timeout=timeout) if res.value != data: raise PingError("echo mismatches sent data") def _get_seq_id(self): return next(self._seqcounter) def _send(self, msg, seq, args): data = brine.dump((msg, seq, args)) # GC might run while sending data # if so, a BaseNetref.__del__ might be called # BaseNetref.__del__ must call asyncreq, # which will cause a deadlock # Solution: # Add the current request to a queue and let the thread that currently # holds the sendlock send it when it's done with its current job. # NOTE: Atomic list operations should be thread safe, # please call me out if they are not on all implementations! self._send_queue.append(data) # It is crucial to check the queue each time AFTER releasing the lock: while self._send_queue: if not self._sendlock.acquire(False): # Another thread holds the lock. It will send the data after # it's done with its current job. We can safely return. return try: # Can happen if another consumer was scheduled in between # `while` and `acquire`: if not self._send_queue: # Must `continue` to ensure that `send_queue` is checked # after releasing the lock! (in case another producer is # scheduled before `release`) continue data = self._send_queue.pop(0) self._channel.send(data) finally: self._sendlock.release() def _send_request(self, seq, handler, args): self._send(consts.MSG_REQUEST, seq, (handler, self._box(args))) def _send_reply(self, seq, obj): self._send(consts.MSG_REPLY, seq, self._box(obj)) def _send_exception(self, seq, exctype, excval, exctb): exc = vinegar.dump( exctype, excval, exctb, include_local_traceback=self._config["include_local_traceback"]) self._send(consts.MSG_EXCEPTION, seq, exc) # # boxing # def _box(self, obj): """store a local object in such a way that it could be recreated on the remote party either by-value or by-reference""" if brine.dumpable(obj): return consts.LABEL_VALUE, obj if type(obj) is tuple: return consts.LABEL_TUPLE, tuple(self._box(item) for item in obj) elif isinstance(obj, netref.BaseNetref) and obj.____conn__() is self: return consts.LABEL_LOCAL_REF, obj.____oid__ else: self._local_objects.add(obj) try: cls = obj.__class__ except Exception: # see issue #16 cls = type(obj) if not isinstance(cls, type): cls = type(obj) return consts.LABEL_REMOTE_REF, (id(obj), cls.__name__, cls.__module__) def _unbox(self, package): """recreate a local object representation of the remote object: if the object is passed by value, just return it; if the object is passed by reference, create a netref to it""" label, value = package if label == consts.LABEL_VALUE: return value if label == consts.LABEL_TUPLE: return tuple(self._unbox(item) for item in value) if label == consts.LABEL_LOCAL_REF: return self._local_objects[value] if label == consts.LABEL_REMOTE_REF: oid, clsname, modname = value if oid in self._proxy_cache: proxy = self._proxy_cache[oid] proxy.____refcount__ += 1 # other side increased refcount on boxing, # if I'm returning from cache instead of new object, # must increase refcount to match return proxy proxy = self._netref_factory(oid, clsname, modname) self._proxy_cache[oid] = proxy return proxy raise ValueError("invalid label %r" % (label, )) def _netref_factory(self, oid, clsname, modname): typeinfo = (clsname, modname) if typeinfo in self._netref_classes_cache: cls = self._netref_classes_cache[typeinfo] elif typeinfo in netref.builtin_classes_cache: cls = netref.builtin_classes_cache[typeinfo] else: info = self.sync_request(consts.HANDLE_INSPECT, oid) cls = netref.class_factory(clsname, modname, info) self._netref_classes_cache[typeinfo] = cls return cls(weakref.ref(self), oid) # # dispatching # def _dispatch_request(self, seq, raw_args): try: handler, args = raw_args args = self._unbox(args) res = self._HANDLERS[handler](self, *args) except: # need to catch old style exceptions too t, v, tb = sys.exc_info() self._last_traceback = tb logger = self._config["logger"] if logger and t is not StopIteration: logger.debug("Exception caught", exc_info=True) if t is SystemExit and self._config["propagate_SystemExit_locally"]: raise if t is KeyboardInterrupt and self._config[ "propagate_KeyboardInterrupt_locally"]: raise self._send_exception(seq, t, v, tb) else: self._send_reply(seq, res) def _dispatch_reply(self, seq, raw): obj = self._unbox(raw) if seq in self._async_callbacks: self._async_callbacks.pop(seq)(False, obj) else: self._sync_replies[seq] = (False, obj) def _unbox_exception(self, raw): return vinegar.load( raw, import_custom_exceptions=self._config["import_custom_exceptions"], instantiate_custom_exceptions=self. _config["instantiate_custom_exceptions"], instantiate_oldstyle_exceptions=self. _config["instantiate_oldstyle_exceptions"]) def _dispatch_exception(self, seq, raw): obj = self._unbox_exception(raw) if seq in self._async_callbacks: self._async_callbacks.pop(seq)(True, obj) else: self._sync_replies[seq] = (True, obj) # # serving # def _recv(self, timeout, wait_for_lock): if not self._recvlock.acquire(wait_for_lock): return None try: if self._channel.poll(timeout): data = self._channel.recv() else: data = None except EOFError: self.close() raise finally: self._recvlock.release() return data def _dispatch(self, data): msg, seq, args = brine.load(data) if msg == consts.MSG_REQUEST: self._dispatch_request(seq, args) elif msg == consts.MSG_REPLY: self._dispatch_reply(seq, args) elif msg == consts.MSG_EXCEPTION: self._dispatch_exception(seq, args) else: raise ValueError("invalid message type: %r" % (msg, )) def sync_recv_and_dispatch(self, timeout, wait_for_lock): # lock or wait for signal if self._sync_lock.acquire(False): try: self._sync_event.clear() data = self._recv(timeout, wait_for_lock=False) if not data: return False self._dispatch(data) return True finally: self._sync_lock.release() self._sync_event.set() else: self._sync_event.wait() def poll(self, timeout=0): """Serves a single transaction, should one arrives in the given interval. Note that handling a request/reply may trigger nested requests, which are all part of a single transaction. :returns: ``True`` if a transaction was served, ``False`` otherwise""" return self.sync_recv_and_dispatch(timeout, wait_for_lock=False) def serve(self, timeout=1): """Serves a single request or reply that arrives within the given time frame (default is 1 sec). Note that the dispatching of a request might trigger multiple (nested) requests, thus this function may be reentrant. :returns: ``True`` if a request or reply were received, ``False`` otherwise. """ return self.sync_recv_and_dispatch(timeout, wait_for_lock=True) def serve_all(self): """Serves all requests and replies for as long as the connection is alive.""" try: while True: self.serve(None) except (socket.error, select_error, IOError): if not self.closed: raise except EOFError: pass finally: self.close() def serve_threaded(self, thread_count=10): def _thread_target(): try: while True: self.serve(None) except (socket.error, select_error, IOError): if not self.closed: raise except EOFError: pass threads = [] """Serves all requests and replies for as long as the connection is alive.""" try: for _ in range(thread_count): thread = Thread(target=_thread_target) thread.daemon = True thread.start() threads.append(thread) for thread in threads: thread.join() finally: self.close() def poll_all(self, timeout=0): """Serves all requests and replies that arrive within the given interval. :returns: ``True`` if at least a single transaction was served, ``False`` otherwise """ at_least_once = False t0 = time.time() duration = timeout try: while True: if self.poll(duration): at_least_once = True if timeout is not None: duration = t0 + timeout - time.time() if duration < 0: break except EOFError: pass return at_least_once # # requests # def sync_request(self, handler, *args): """Sends a synchronous request (waits for the reply to arrive) :raises: any exception that the requets may be generated :returns: the result of the request """ seq = self._get_seq_id() self._send_request(seq, handler, args) timeout = self._config["sync_request_timeout"] while seq not in self._sync_replies: self.sync_recv_and_dispatch(timeout, True) isexc, obj = self._sync_replies.pop(seq) if isexc: raise obj else: return obj def _async_request(self, handler, args=(), callback=(lambda a, b: None)): seq = self._get_seq_id() self._async_callbacks[seq] = callback try: self._send_request(seq, handler, args) except: if seq in self._async_callbacks: del self._async_callbacks[seq] raise def async_request(self, handler, *args, **kwargs): """Send an asynchronous request (does not wait for it to finish) :returns: an :class:`rpyc.core.async.AsyncResult` object, which will eventually hold the result (or exception) """ timeout = kwargs.pop("timeout", None) if kwargs: raise TypeError("got unexpected keyword argument(s) %s" % (list(kwargs.keys()), )) res = AsyncResult(weakref.proxy(self)) self._async_request(handler, args, res) if timeout is not None: res.set_expiry(timeout) return res @property def root(self): """Fetches the root object (service) of the other party""" if self._remote_root is None: self._remote_root = self.sync_request(consts.HANDLE_GETROOT) return self._remote_root # # attribute access # def _check_attr(self, obj, name, perm): config = self._config if not config[perm]: raise AttributeError("cannot access %r" % (name, )) if config["allow_exposed_attrs"]: prefix = config["exposed_prefix"] name2 = name if name.startswith(prefix) else prefix + name if hasattr(obj, name2): return name2 if (self._config["allow_all_attrs"] or self._config["allow_safe_attrs"] and name in self._config["safe_attrs"] or self._config["allow_public_attrs"] and not name.startswith("_")): return name raise AttributeError("cannot access %r" % (name, )) def _access_attr(self, obj, name, args, overrider, param, default): if is_py3k: if type(name) is bytes: name = str(name, "utf8") elif type(name) is not str: raise TypeError("name must be a string") else: if type(name) not in (str, unicode): raise TypeError("name must be a string") name = str(name) # IronPython issue #10 + py3k issue accessor = getattr(type(obj), overrider, None) if accessor is None: accessor = default name = self._check_attr(obj, name, param) return accessor(obj, name, *args) # # request handlers # @classmethod def _request_handlers(cls): return { consts.HANDLE_PING: cls._handle_ping, consts.HANDLE_CLOSE: cls._handle_close, consts.HANDLE_GETROOT: cls._handle_getroot, consts.HANDLE_GETATTR: cls._handle_getattr, consts.HANDLE_DELATTR: cls._handle_delattr, consts.HANDLE_SETATTR: cls._handle_setattr, consts.HANDLE_CALL: cls._handle_call, consts.HANDLE_CALLATTR: cls._handle_callattr, consts.HANDLE_REPR: cls._handle_repr, consts.HANDLE_STR: cls._handle_str, consts.HANDLE_CMP: cls._handle_cmp, consts.HANDLE_HASH: cls._handle_hash, consts.HANDLE_DIR: cls._handle_dir, consts.HANDLE_PICKLE: cls._handle_pickle, consts.HANDLE_DEL: cls._handle_del, consts.HANDLE_INSPECT: cls._handle_inspect, consts.HANDLE_BUFFITER: cls._handle_buffiter, consts.HANDLE_OLDSLICING: cls._handle_oldslicing, consts.HANDLE_CTXEXIT: cls._handle_ctxexit, } def _handle_ping(self, data): return data def _handle_close(self): self._cleanup() def _handle_getroot(self): return self._local_root def _handle_del(self, obj, count=1): self._local_objects.decref(id(obj), count) def _handle_repr(self, obj): return repr(obj) def _handle_str(self, obj): return str(obj) def _handle_cmp(self, obj, other): # cmp() might enter recursive resonance... yet another workaround #return cmp(obj, other) try: return type(obj).__cmp__(obj, other) except (AttributeError, TypeError): return NotImplemented def _handle_hash(self, obj): return hash(obj) def _handle_call(self, obj, args, kwargs=()): return obj(*args, **dict(kwargs)) def _handle_dir(self, obj): return tuple(dir(obj)) def _handle_inspect(self, oid): return tuple(netref.inspect_methods(self._local_objects[oid])) def _handle_getattr(self, obj, name): return self._access_attr(obj, name, (), "_rpyc_getattr", "allow_getattr", getattr) def _handle_delattr(self, obj, name): return self._access_attr(obj, name, (), "_rpyc_delattr", "allow_delattr", delattr) def _handle_setattr(self, obj, name, value): return self._access_attr(obj, name, (value, ), "_rpyc_setattr", "allow_setattr", setattr) def _handle_callattr(self, obj, name, args, kwargs=()): obj = self._handle_getattr(obj, name) return self._handle_call(obj, args, kwargs) def _handle_ctxexit(self, obj, exc): if exc: try: raise exc except: exc, typ, tb = sys.exc_info() else: typ = tb = None return self._handle_getattr(obj, "__exit__")(exc, typ, tb) def _handle_pickle(self, obj, proto): if not self._config["allow_pickle"]: raise ValueError("pickling is disabled") return pickle.dumps(obj, proto) def _handle_buffiter(self, obj, count): return tuple(itertools.islice(obj, count)) def _handle_oldslicing(self, obj, attempt, fallback, start, stop, args): try: # first try __xxxitem__ getitem = self._handle_getattr(obj, attempt) return getitem(slice(start, stop), *args) except Exception: # fallback to __xxxslice__. see issue #41 if stop is None: stop = maxint getslice = self._handle_getattr(obj, fallback) return getslice(start, stop, *args)
class Connection(object): """The RPyC *connection* (AKA *protocol*). :param service: the :class:`Service <rpyc.core.service.Service>` to expose :param channel: the :class:`Channel <rpyc.core.channel.Channel>` over which messages are passed :param config: the connection's configuration dict (overriding parameters from the :data:`default configuration <DEFAULT_CONFIG>`) :param _lazy: whether or not to initialize the service with the creation of the connection. Default is True. If set to False, you will need to call :func:`_init_service` manually later """ SYNC_REQUEST_TIMEOUT = 30 def __init__(self, service, channel, config={}, _lazy=False): self._closed = True self._config = DEFAULT_CONFIG.copy() self._config.update(config) if self._config["connid"] is None: self._config["connid"] = "conn%d" % ( next(_connection_id_generator), ) self._channel = channel self._seqcounter = itertools.count() self._recvlock = Lock() self._sendlock = Lock() self._sync_replies = {} self._sync_lock = RLock() self._sync_event = Event() self._async_callbacks = {} self._local_objects = RefCountingColl() self._last_traceback = None self._proxy_cache = WeakValueDict() self._netref_classes_cache = {} self._remote_root = None self._local_root = service(weakref.proxy(self)) if not _lazy: self._init_service() self._closed = False def _init_service(self): self._local_root.on_connect() def __del__(self): self.close() def __enter__(self): return self def __exit__(self, t, v, tb): self.close() def __repr__(self): a, b = object.__repr__(self).split(" object ") return "%s %r object %s" % (a, self._config["connid"], b) # # IO # def _cleanup(self, _anyway=True): if self._closed and not _anyway: return self._closed = True self._channel.close() self._local_root.on_disconnect() self._sync_replies.clear() self._async_callbacks.clear() self._local_objects.clear() self._proxy_cache.clear() self._netref_classes_cache.clear() self._last_traceback = None self._remote_root = None self._local_root = None #self._seqcounter = None #self._config.clear() def close(self, _catchall=True): """closes the connection, releasing all held resources""" if self._closed: return self._closed = True try: self._async_request(consts.HANDLE_CLOSE) except EOFError: pass except Exception: if not _catchall: raise finally: self._cleanup(_anyway=True) @property def closed(self): """Indicates whether the connection has been closed or not""" return self._closed def fileno(self): """Returns the connectin's underlying file descriptor""" return self._channel.fileno() def ping(self, data=None, timeout=3): """ Asserts that the other party is functioning properly, by making sure the *data* is echoed back before the *timeout* expires :param data: the data to send (leave ``None`` for the default buffer) :param timeout: the maximal time to wait for echo :raises: :class:`PingError` if the echoed data does not match """ if data is None: data = "abcdefghijklmnopqrstuvwxyz" * 20 res = self.async_request(consts.HANDLE_PING, data, timeout=timeout) if res.value != data: raise PingError("echo mismatches sent data") def _get_seq_id(self): return next(self._seqcounter) def _send(self, msg, seq, args): data = brine.dump((msg, seq, args)) # GC might run while sending data # if so, a BaseNetref.__del__ might be called # BaseNetref.__del__ must call asyncreq, # which will cause a deadlock is_gc_enabled = gc.isenabled() gc.disable() self._sendlock.acquire() try: self._channel.send(data) finally: self._sendlock.release() if is_gc_enabled: gc.enable() def _send_request(self, seq, handler, args): self._send(consts.MSG_REQUEST, seq, (handler, self._box(args))) def _send_reply(self, seq, obj): self._send(consts.MSG_REPLY, seq, self._box(obj)) def _send_exception(self, seq, exctype, excval, exctb): exc = vinegar.dump( exctype, excval, exctb, include_local_traceback=self._config["include_local_traceback"]) self._send(consts.MSG_EXCEPTION, seq, exc) # # boxing # def _box(self, obj): """store a local object in such a way that it could be recreated on the remote party either by-value or by-reference""" if brine.dumpable(obj): return consts.LABEL_VALUE, obj if type(obj) is tuple: return consts.LABEL_TUPLE, tuple(self._box(item) for item in obj) elif isinstance(obj, netref.BaseNetref) and obj.____conn__() is self: return consts.LABEL_LOCAL_REF, obj.____oid__ else: self._local_objects.add(obj) try: cls = obj.__class__ except Exception: # see issue #16 cls = type(obj) if not isinstance(cls, type): cls = type(obj) return consts.LABEL_REMOTE_REF, (id(obj), cls.__name__, cls.__module__) def _unbox(self, package): """recreate a local object representation of the remote object: if the object is passed by value, just return it; if the object is passed by reference, create a netref to it""" label, value = package if label == consts.LABEL_VALUE: return value if label == consts.LABEL_TUPLE: return tuple(self._unbox(item) for item in value) if label == consts.LABEL_LOCAL_REF: return self._local_objects[value] if label == consts.LABEL_REMOTE_REF: oid, clsname, modname = value if oid in self._proxy_cache: return self._proxy_cache[oid] proxy = self._netref_factory(oid, clsname, modname) self._proxy_cache[oid] = proxy return proxy raise ValueError("invalid label %r" % (label, )) def _netref_factory(self, oid, clsname, modname): typeinfo = (clsname, modname) if typeinfo in self._netref_classes_cache: cls = self._netref_classes_cache[typeinfo] elif typeinfo in netref.builtin_classes_cache: cls = netref.builtin_classes_cache[typeinfo] else: info = self.sync_request(consts.HANDLE_INSPECT, oid) cls = netref.class_factory(clsname, modname, info) self._netref_classes_cache[typeinfo] = cls return cls(weakref.ref(self), oid) # # dispatching # def _dispatch_request(self, seq, raw_args): logger = self._config["logger"] try: handler, args = raw_args if logger: logger.debug("dispatching: %r (%r)", handler, seq) args = self._unbox(args) res = self._HANDLERS[handler](self, *args) except: # need to catch old style exceptions too t, v, tb = sys.exc_info() self._last_traceback = tb if logger and t is not StopIteration: logger.debug("Exception caught", exc_info=True) if t is SystemExit and self._config["propagate_SystemExit_locally"]: raise if t is KeyboardInterrupt and self._config[ "propagate_KeyboardInterrupt_locally"]: raise self._send_exception(seq, t, v, tb) else: self._send_reply(seq, res) def _dispatch_reply(self, seq, raw): obj = self._unbox(raw) if seq in self._async_callbacks: self._async_callbacks.pop(seq)(False, obj) else: self._sync_replies[seq] = (False, obj) def _dispatch_exception(self, seq, raw): obj = vinegar.load( raw, import_custom_exceptions=self._config["import_custom_exceptions"], instantiate_custom_exceptions=self. _config["instantiate_custom_exceptions"], instantiate_oldstyle_exceptions=self. _config["instantiate_oldstyle_exceptions"]) if seq in self._async_callbacks: self._async_callbacks.pop(seq)(True, obj) else: self._sync_replies[seq] = (True, obj) # # serving # def _recv(self, timeout, wait_for_lock): if not self._recvlock.acquire(wait_for_lock): return None try: if self._channel.poll(timeout): data = self._channel.recv() else: data = None except EOFError: self.close() raise finally: self._recvlock.release() return data def _dispatch(self, data): msg, seq, args = brine.load(data) if msg == consts.MSG_REQUEST: self._dispatch_request(seq, args) elif msg == consts.MSG_REPLY: self._dispatch_reply(seq, args) elif msg == consts.MSG_EXCEPTION: self._dispatch_exception(seq, args) else: raise ValueError("invalid message type: %r" % (msg, )) def poll(self, timeout=0): """Serves a single transaction, should one arrives in the given interval. Note that handling a request/reply may trigger nested requests, which are all part of a single transaction. :returns: ``True`` if a transaction was served, ``False`` otherwise""" data = self._recv(timeout, wait_for_lock=False) if not data: return False self._dispatch(data) return True def serve(self, timeout=1): """Serves a single request or reply that arrives within the given time frame (default is 1 sec). Note that the dispatching of a request might trigger multiple (nested) requests, thus this function may be reentrant. :returns: ``True`` if a request or reply were received, ``False`` otherwise. """ data = self._recv(timeout, wait_for_lock=True) if not data: return False self._dispatch(data) return True def serve_all(self): """Serves all requests and replies for as long as the connection is alive.""" try: while True: self.serve(0.1) except (socket.error, select_error, IOError): if not self.closed: raise except EOFError: pass finally: self.close() def poll_all(self, timeout=0): """Serves all requests and replies that arrive within the given interval. :returns: ``True`` if at least a single transaction was served, ``False`` otherwise """ at_least_once = False t0 = time.time() duration = timeout try: while True: if self.poll(duration): at_least_once = True if timeout is not None: duration = t0 + timeout - time.time() if duration < 0: break except EOFError: pass return at_least_once # # requests # def sync_request(self, handler, *args): """Sends a synchronous request (waits for the reply to arrive) :raises: any exception that the requets may be generated :returns: the result of the request """ seq = self._get_seq_id() self._send_request(seq, handler, args) start_time = time.time() while seq not in self._sync_replies: remaining_time = self.SYNC_REQUEST_TIMEOUT - (time.time() - start_time) if remaining_time < 0: raise socket.timeout # lock or wait for signal if self._sync_lock.acquire(False): self._sync_event.clear() try: self.serve(remaining_time) finally: self._sync_lock.release() self._sync_event.set() else: self._sync_event.wait(remaining_time) isexc, obj = self._sync_replies.pop(seq) if isexc: raise obj else: return obj def _async_request(self, handler, args=(), callback=(lambda a, b: None)): seq = self._get_seq_id() self._async_callbacks[seq] = callback try: self._send_request(seq, handler, args) except: if seq in self._async_callbacks: del self._async_callbacks[seq] raise def async_request(self, handler, *args, **kwargs): """Send an asynchronous request (does not wait for it to finish) :returns: an :class:`rpyc.core.async.AsyncResult` object, which will eventually hold the result (or exception) """ timeout = kwargs.pop("timeout", None) if kwargs: raise TypeError("got unexpected keyword argument(s) %s" % (list(kwargs.keys()), )) res = AsyncResult(weakref.proxy(self)) self._async_request(handler, args, res) if timeout is not None: res.set_expiry(timeout) return res @property def root(self): """Fetches the root object (service) of the other party""" if self._remote_root is None: self._remote_root = self.sync_request(consts.HANDLE_GETROOT) return self._remote_root # # attribute access # def _check_attr(self, obj, name): if self._config["allow_exposed_attrs"]: if name.startswith(self._config["exposed_prefix"]): name2 = name else: name2 = self._config["exposed_prefix"] + name if hasattr(obj, name2): return name2 if self._config["allow_all_attrs"]: return name if self._config["allow_safe_attrs"] and name in self._config[ "safe_attrs"]: return name if self._config["allow_public_attrs"] and not name.startswith("_"): return name return False def _access_attr(self, oid, name, args, overrider, param, default): if is_py3k: if type(name) is bytes: name = str(name, "utf8") elif type(name) is not str: raise TypeError("name must be a string") else: if type(name) not in (str, unicode): raise TypeError("name must be a string") name = str(name) # IronPython issue #10 + py3k issue obj = self._local_objects[oid] accessor = getattr(type(obj), overrider, None) if accessor is None: name2 = self._check_attr(obj, name) if not self._config[param] or not name2: raise AttributeError("cannot access %r" % (name, )) accessor = default name = name2 return accessor(obj, name, *args) # # request handlers # def _handle_ping(self, data): return data def _handle_close(self): self._cleanup() def _handle_getroot(self): return self._local_root def _handle_del(self, oid): self._local_objects.decref(oid) def _handle_repr(self, oid): return repr(self._local_objects[oid]) def _handle_str(self, oid): return str(self._local_objects[oid]) def _handle_cmp(self, oid, other): # cmp() might enter recursive resonance... yet another workaround #return cmp(self._local_objects[oid], other) obj = self._local_objects[oid] try: return type(obj).__cmp__(obj, other) except (AttributeError, TypeError): return NotImplemented def _handle_hash(self, oid): return hash(self._local_objects[oid]) def _handle_call(self, oid, args, kwargs=()): return self._local_objects[oid](*args, **dict(kwargs)) def _handle_dir(self, oid): return tuple(dir(self._local_objects[oid])) def _handle_inspect(self, oid): return tuple(netref.inspect_methods(self._local_objects[oid])) def _handle_getattr(self, oid, name): return self._access_attr(oid, name, (), "_rpyc_getattr", "allow_getattr", getattr) def _handle_delattr(self, oid, name): return self._access_attr(oid, name, (), "_rpyc_delattr", "allow_delattr", delattr) def _handle_setattr(self, oid, name, value): return self._access_attr(oid, name, (value, ), "_rpyc_setattr", "allow_setattr", setattr) def _handle_callattr(self, oid, name, args, kwargs): return self._handle_getattr(oid, name)(*args, **dict(kwargs)) def _handle_pickle(self, oid, proto): if not self._config["allow_pickle"]: raise ValueError("pickling is disabled") return pickle.dumps(self._local_objects[oid], proto) def _handle_buffiter(self, oid, count): items = [] obj = self._local_objects[oid] i = 0 try: while i < count: items.append(next(obj)) i += 1 except StopIteration: pass return tuple(items) def _handle_oldslicing(self, oid, attempt, fallback, start, stop, args): try: # first try __xxxitem__ getitem = self._handle_getattr(oid, attempt) return getitem(slice(start, stop), *args) except Exception: # fallback to __xxxslice__. see issue #41 if stop is None: stop = maxint getslice = self._handle_getattr(oid, fallback) return getslice(start, stop, *args) # collect handlers _HANDLERS = {} for name, obj in dict(locals()).items(): if name.startswith("_handle_"): name2 = "HANDLE_" + name[8:].upper() if hasattr(consts, name2): _HANDLERS[getattr(consts, name2)] = obj else: raise NameError("no constant defined for %r", name) del name, name2, obj
class Connection(object): """The RPyC *connection* (AKA *protocol*). :param service: the :class:`Service <rpyc.core.service.Service>` to expose :param channel: the :class:`Channel <rpyc.core.channel.Channel>` over which messages are passed :param config: the connection's configuration dict (overriding parameters from the :data:`default configuration <DEFAULT_CONFIG>`) :param _lazy: whether or not to initialize the service with the creation of the connection. Default is True. If set to False, you will need to call :func:`_init_service` manually later """ def __init__(self, service, channel, config = {}, _lazy = False): self._closed = True self._config = DEFAULT_CONFIG.copy() self._config.update(config) if self._config["connid"] is None: self._config["connid"] = "conn%d" % (next(_connection_id_generator),) self._channel = channel self._seqcounter = itertools.count() self._recvlock = Lock() self._sendlock = Lock() self._sync_replies = {} self._async_callbacks = {} self._local_objects = RefCountingColl() self._last_traceback = None self._proxy_cache = WeakValueDict() self._netref_classes_cache = {} self._remote_root = None self._local_root = service(weakref.proxy(self)) if not _lazy: self._init_service() self._closed = False def _init_service(self): self._local_root.on_connect() def __del__(self): self.close() def __enter__(self): return self def __exit__(self, t, v, tb): self.close() def __repr__(self): a, b = object.__repr__(self).split(" object ") return "%s %r object %s" % (a, self._config["connid"], b) # # IO # def _cleanup(self, _anyway = True): if self._closed and not _anyway: return self._closed = True self._channel.close() self._local_root.on_disconnect() self._sync_replies.clear() self._async_callbacks.clear() self._local_objects.clear() self._proxy_cache.clear() self._netref_classes_cache.clear() self._last_traceback = None self._remote_root = None self._local_root = None #self._seqcounter = None #self._config.clear() def close(self, _catchall = True): """closes the connection, releasing all held resources""" if self._closed: return self._closed = True try: try: self._async_request(consts.HANDLE_CLOSE) except EOFError: pass except Exception: if not _catchall: raise finally: self._cleanup(_anyway = True) @property def closed(self): """Indicates whether the connection has been closed or not""" return self._closed def fileno(self): """Returns the connectin's underlying file descriptor""" return self._channel.fileno() def ping(self, data = None, timeout = 3): """ Asserts that the other party is functioning properly, by making sure the *data* is echoed back before the *timeout* expires :param data: the data to send (leave ``None`` for the default buffer) :param timeout: the maximal time to wait for echo :raises: :class:`PingError` if the echoed data does not match """ if data is None: data = "abcdefghijklmnopqrstuvwxyz" * 20 res = self.async_request(consts.HANDLE_PING, data, timeout = timeout) if res.value != data: raise PingError("echo mismatches sent data") def _send(self, msg, seq, args): data = brine.dump((msg, seq, args)) self._sendlock.acquire() try: self._channel.send(data) finally: self._sendlock.release() def _send_request(self, handler, args): seq = next(self._seqcounter) self._send(consts.MSG_REQUEST, seq, (handler, self._box(args))) return seq def _send_reply(self, seq, obj): self._send(consts.MSG_REPLY, seq, self._box(obj)) def _send_exception(self, seq, exctype, excval, exctb): exc = vinegar.dump(exctype, excval, exctb, include_local_traceback = self._config["include_local_traceback"]) self._send(consts.MSG_EXCEPTION, seq, exc) # # boxing # def _box(self, obj): """store a local object in such a way that it could be recreated on the remote party either by-value or by-reference""" if brine.dumpable(obj): return consts.LABEL_VALUE, obj if type(obj) is tuple: return consts.LABEL_TUPLE, tuple(self._box(item) for item in obj) elif isinstance(obj, netref.BaseNetref) and obj.____conn__() is self: return consts.LABEL_LOCAL_REF, obj.____oid__ elif isinstance(obj, AsyncResult): obj.async_set_expiry(3) # HACK: wait at most 3 seconds until the async result arrives return self._box(obj.async_value) else: self._local_objects.add(obj) try: cls = obj.__class__ except Exception: # see issue #16 cls = type(obj) return consts.LABEL_REMOTE_REF, (id(obj), cls.__name__, cls.__module__) def _unbox(self, package): """recreate a local object representation of the remote object: if the object is passed by value, just return it; if the object is passed by reference, create a netref to it""" label, value = package if label == consts.LABEL_VALUE: return value if label == consts.LABEL_TUPLE: return tuple(self._unbox(item) for item in value) if label == consts.LABEL_LOCAL_REF: return self._local_objects[value] if label == consts.LABEL_REMOTE_REF: oid, clsname, modname = value if oid in self._proxy_cache: return self._proxy_cache[oid] proxy = self._netref_factory(oid, clsname, modname) self._proxy_cache[oid] = proxy return proxy raise ValueError("invalid label %r" % (label,)) def _netref_factory(self, oid, clsname, modname): typeinfo = (clsname, modname) if typeinfo in self._netref_classes_cache: cls = self._netref_classes_cache[typeinfo] elif typeinfo in netref.builtin_classes_cache: cls = netref.builtin_classes_cache[typeinfo] else: info = self.sync_request(consts.HANDLE_INSPECT, oid) cls = netref.class_factory(clsname, modname, info) self._netref_classes_cache[typeinfo] = cls inst = cls(weakref.ref(self), oid) return inst # # dispatching # def _dispatch_request(self, seq, raw_args): try: handler, args = raw_args args = self._unbox(args) res = self._HANDLERS[handler](self, *args) except KeyboardInterrupt: raise except: # need to catch old style exceptions too t, v, tb = sys.exc_info() self._last_traceback = tb if self._config["logger"]: self._config["logger"].debug("Exception caught", exc_info=True) if t is SystemExit and self._config["propagate_SystemExit_locally"]: raise self._send_exception(seq, t, v, tb) else: self._send_reply(seq, res) def _dispatch_reply(self, seq, raw): obj = self._unbox(raw) if seq in self._async_callbacks: self._async_callbacks.pop(seq)(False, obj) else: self._sync_replies[seq] = (False, obj) def _dispatch_exception(self, seq, raw): obj = vinegar.load(raw, import_custom_exceptions = self._config["import_custom_exceptions"], instantiate_custom_exceptions = self._config["instantiate_custom_exceptions"], instantiate_oldstyle_exceptions = self._config["instantiate_oldstyle_exceptions"]) if seq in self._async_callbacks: self._async_callbacks.pop(seq)(True, obj) else: self._sync_replies[seq] = (True, obj) # # serving # def _recv(self, timeout, wait_for_lock): if not self._recvlock.acquire(wait_for_lock): return None try: try: if self._channel.poll(timeout): data = self._channel.recv() else: data = None except EOFError: self.close() raise finally: self._recvlock.release() return data def _dispatch(self, data): msg, seq, args = brine.load(data) if msg == consts.MSG_REQUEST: try: # note: we're acquiring a shared lock here since we want network event handlers to be dispatched # atomically w.r.t. the local game state and engne state #framework.tickmodule.engine_lock.acquire() self._dispatch_request(seq, args) finally: #framework.tickmodule.engine_lock.release() pass #self._dispatch_request(seq, args) elif msg == consts.MSG_REPLY: self._dispatch_reply(seq, args) elif msg == consts.MSG_EXCEPTION: self._dispatch_exception(seq, args) else: raise ValueError("invalid message type: %r" % (msg,)) def poll(self, timeout = 0): """Serves a single transaction, should one arrives in the given interval. Note that handling a request/reply may trigger nested requests, which are all part of a single transaction. :returns: ``True`` if a transaction was served, ``False`` otherwise""" data = self._recv(timeout, wait_for_lock = False) if not data: return False self._dispatch(data) return True def serve(self, timeout = 1): """Serves a single request or reply that arrives within the given time frame (default is 1 sec). Note that the dispatching of a request might trigger multiple (nested) requests, thus this function may be reentrant. :returns: ``True`` if a request or reply were received, ``False`` otherwise. """ data = self._recv(timeout, wait_for_lock = True) if not data: return False # print "-->got data (",data,")" try: # note: we're acquiring a shared lock here since we want network event handlers to be dispatched # atomically w.r.t. the local game state and engne state #framework.tickmodule.shared_lock.acquire() self._dispatch(data) finally: #framework.tickmodule.shared_lock.release() pass return True def serve_all(self): """Serves all requests and replies for as long as the connection is alive.""" try: try: while True: self.serve(0.1) except (socket.error, select_error, IOError): if not self.closed: raise except EOFError: pass except Exception,e: print "Exception during serve_all:" print e finally: self.close() def poll_all(self, timeout = 0): """Serves all requests and replies that arrive within the given interval. :returns: ``True`` if at least a single transaction was served, ``False`` otherwise """ at_least_once = False t0 = time.time() duration = timeout try: while True: if self.poll(duration): at_least_once = True if timeout is not None: duration = t0 + timeout - time.time() if duration < 0: break except EOFError: pass return at_least_once # # requests # def sync_request(self, handler, *args): """Sends a synchronous request (waits for the reply to arrive) :raises: any exception that the requets may be generated :returns: the result of the request """ seq = self._send_request(handler, args) while seq not in self._sync_replies: self.serve(0.1) isexc, obj = self._sync_replies.pop(seq) if isexc: raise obj else: return obj def _async_request(self, handler, args = (), callback = (lambda a, b: None)): seq = self._send_request(handler, args) self._async_callbacks[seq] = callback def async_request(self, handler, *args, **kwargs): """Send an asynchronous request (does not wait for it to finish) :returns: an :class:`rpyc.core.async.AsyncResult` object, which will eventually hold the result (or exception) """ timeout = kwargs.pop("timeout", None) if kwargs: raise TypeError("got unexpected keyword argument(s) %s" % (list(kwargs.keys()),)) res = AsyncResult(weakref.proxy(self)) self._async_request(handler, args, res.async_assign) if timeout is not None: res.set_expiry(timeout) return res @property def root(self): """Fetches the root object (service) of the other party""" if self._remote_root is None: self._remote_root = self.sync_request(consts.HANDLE_GETROOT) return self._remote_root # # attribute access # def _check_attr(self, obj, name): if self._config["allow_exposed_attrs"]: if name.startswith(self._config["exposed_prefix"]): name2 = name else: name2 = self._config["exposed_prefix"] + name if hasattr(obj, name2): return name2 if self._config["allow_all_attrs"]: return name if self._config["allow_safe_attrs"] and name in self._config["safe_attrs"]: return name if self._config["allow_public_attrs"] and not name.startswith("_"): return name return False def _access_attr(self, oid, name, args, overrider, param, default): if is_py3k: if type(name) is bytes: name = str(name, "utf8") elif type(name) is not str: raise TypeError("name must be a string") else: if type(name) not in (str, unicode): raise TypeError("name must be a string") name = str(name) # IronPython issue #10 + py3k issue obj = self._local_objects[oid] accessor = getattr(type(obj), overrider, None) if accessor is None: name2 = self._check_attr(obj, name) if not self._config[param] or not name2: raise AttributeError("cannot access %r" % (name,)) accessor = default name = name2 return accessor(obj, name, *args) # # request handlers # def _handle_ping(self, data): return data def _handle_close(self): self._cleanup() def _handle_getroot(self): return self._local_root def _handle_del(self, oid): self._local_objects.decref(oid) def _handle_repr(self, oid): return repr(self._local_objects[oid]) def _handle_str(self, oid): return str(self._local_objects[oid]) def _handle_cmp(self, oid, other): # cmp() might enter recursive resonance... yet another workaround #return cmp(self._local_objects[oid], other) obj = self._local_objects[oid] try: return type(obj).__cmp__(obj, other) except (AttributeError, TypeError): return NotImplemented def _handle_hash(self, oid): return hash(self._local_objects[oid]) def _handle_call(self, oid, args, kwargs=()): return self._local_objects[oid](*args, **dict(kwargs)) def _handle_dir(self, oid): return tuple(dir(self._local_objects[oid])) def _handle_inspect(self, oid): return tuple(netref.inspect_methods(self._local_objects[oid])) def _handle_getattr(self, oid, name): return self._access_attr(oid, name, (), "_rpyc_getattr", "allow_getattr", getattr) def _handle_delattr(self, oid, name): return self._access_attr(oid, name, (), "_rpyc_delattr", "allow_delattr", delattr) def _handle_setattr(self, oid, name, value): return self._access_attr(oid, name, (value,), "_rpyc_setattr", "allow_setattr", setattr) def _handle_callattr(self, oid, name, args, kwargs): return self._handle_getattr(oid, name)(*args, **dict(kwargs)) def _handle_pickle(self, oid, proto): if not self._config["allow_pickle"]: raise ValueError("pickling is disabled") return pickle.dumps(self._local_objects[oid], proto) def _handle_buffiter(self, oid, count): items = [] obj = self._local_objects[oid] i = 0 try: while i < count: items.append(next(obj)) i += 1 except StopIteration: pass return tuple(items) def _handle_oldslicing(self, oid, attempt, fallback, start, stop, args): try: # first try __xxxitem__ getitem = self._handle_getattr(oid, attempt) return getitem(slice(start, stop), *args) except Exception: # fallback to __xxxslice__. see issue #41 if stop is None: stop = maxint getslice = self._handle_getattr(oid, fallback) return getslice(start, stop, *args) def _handle_iter(self, oid): return iter(self._local_objects[oid]) # collect handlers _HANDLERS = {} for name, obj in dict(locals()).items(): if name.startswith("_handle_"): name2 = "HANDLE_" + name[8:].upper() if hasattr(consts, name2): _HANDLERS[getattr(consts, name2)] = obj else: raise NameError("no constant defined for %r", name) del name, name2, obj
class Connection(object): """The RPyC *connection* (AKA *protocol*). :param root: the :class:`~rpyc.core.service.Service` object to expose :param channel: the :class:`~rpyc.core.channel.Channel` over which messages are passed :param config: the connection's configuration dict (overriding parameters from the :data:`default configuration <DEFAULT_CONFIG>`) """ def __init__(self, root, channel, config={}): self._closed = True self._config = DEFAULT_CONFIG.copy() self._config.update(config) if self._config["connid"] is None: self._config["connid"] = "conn%d" % (next(_connection_id_generator),) self._HANDLERS = self._request_handlers() self._channel = channel self._seqcounter = itertools.count() self._recvlock = Lock() self._sendlock = Lock() self._recv_event = Condition() self._request_callbacks = {} self._local_objects = RefCountingColl() self._last_traceback = None self._proxy_cache = WeakValueDict() self._netref_classes_cache = {} self._remote_root = None self._send_queue = [] self._local_root = root self._closed = False def __del__(self): self.close() def __enter__(self): return self def __exit__(self, t, v, tb): self.close() def __repr__(self): a, b = object.__repr__(self).split(" object ") return "%s %r object %s" % (a, self._config["connid"], b) # # IO # def _cleanup(self, _anyway = True): if self._closed and not _anyway: return self._closed = True self._channel.close() self._local_root.on_disconnect(self) self._request_callbacks.clear() self._local_objects.clear() self._proxy_cache.clear() self._netref_classes_cache.clear() self._last_traceback = None self._remote_root = None self._local_root = None #self._seqcounter = None #self._config.clear() del self._HANDLERS def close(self, _catchall = True): """closes the connection, releasing all held resources""" if self._closed: return self._closed = True try: self._async_request(consts.HANDLE_CLOSE) except EOFError: pass except Exception: if not _catchall: raise finally: self._cleanup(_anyway = True) @property def closed(self): """Indicates whether the connection has been closed or not""" return self._closed def fileno(self): """Returns the connectin's underlying file descriptor""" return self._channel.fileno() def ping(self, data = None, timeout = 3): """ Asserts that the other party is functioning properly, by making sure the *data* is echoed back before the *timeout* expires :param data: the data to send (leave ``None`` for the default buffer) :param timeout: the maximal time to wait for echo :raises: :class:`PingError` if the echoed data does not match :raises: :class:`EOFError` if the remote host closes the connection """ if data is None: data = "abcdefghijklmnopqrstuvwxyz" * 20 res = self.async_request(consts.HANDLE_PING, data, timeout = timeout) if res.value != data: raise PingError("echo mismatches sent data") def _get_seq_id(self): return next(self._seqcounter) def _send(self, msg, seq, args): data = brine.dump((msg, seq, args)) # GC might run while sending data # if so, a BaseNetref.__del__ might be called # BaseNetref.__del__ must call asyncreq, # which will cause a deadlock # Solution: # Add the current request to a queue and let the thread that currently # holds the sendlock send it when it's done with its current job. # NOTE: Atomic list operations should be thread safe, # please call me out if they are not on all implementations! self._send_queue.append(data) # It is crucial to check the queue each time AFTER releasing the lock: while self._send_queue: if not self._sendlock.acquire(False): # Another thread holds the lock. It will send the data after # it's done with its current job. We can safely return. return try: # Can happen if another consumer was scheduled in between # `while` and `acquire`: if not self._send_queue: # Must `continue` to ensure that `send_queue` is checked # after releasing the lock! (in case another producer is # scheduled before `release`) continue data = self._send_queue.pop(0) self._channel.send(data) finally: self._sendlock.release() # # boxing # def _box(self, obj): """store a local object in such a way that it could be recreated on the remote party either by-value or by-reference""" if brine.dumpable(obj): return consts.LABEL_VALUE, obj if type(obj) is tuple: return consts.LABEL_TUPLE, tuple(self._box(item) for item in obj) elif isinstance(obj, netref.BaseNetref) and obj.____conn__ is self: return consts.LABEL_LOCAL_REF, obj.____oid__ else: self._local_objects.add(obj) try: cls = obj.__class__ except Exception: # see issue #16 cls = type(obj) if not isinstance(cls, type): cls = type(obj) return consts.LABEL_REMOTE_REF, (id(obj), cls.__name__, cls.__module__) def _unbox(self, package): """recreate a local object representation of the remote object: if the object is passed by value, just return it; if the object is passed by reference, create a netref to it""" label, value = package if label == consts.LABEL_VALUE: return value if label == consts.LABEL_TUPLE: return tuple(self._unbox(item) for item in value) if label == consts.LABEL_LOCAL_REF: return self._local_objects[value] if label == consts.LABEL_REMOTE_REF: oid, clsname, modname = value if oid in self._proxy_cache: proxy = self._proxy_cache[oid] proxy.____refcount__ += 1 # other side increased refcount on boxing, # if I'm returning from cache instead of new object, # must increase refcount to match return proxy proxy = self._netref_factory(oid, clsname, modname) self._proxy_cache[oid] = proxy return proxy raise ValueError("invalid label %r" % (label,)) def _netref_factory(self, oid, clsname, modname): typeinfo = (clsname, modname) if typeinfo in self._netref_classes_cache: cls = self._netref_classes_cache[typeinfo] elif typeinfo in netref.builtin_classes_cache: cls = netref.builtin_classes_cache[typeinfo] else: info = self.sync_request(consts.HANDLE_INSPECT, oid) cls = netref.class_factory(clsname, modname, info) self._netref_classes_cache[typeinfo] = cls return cls(self, oid) # # dispatching # def _dispatch_request(self, seq, raw_args): try: handler, args = raw_args args = self._unbox(args) res = self._HANDLERS[handler](self, *args) except: # need to catch old style exceptions too t, v, tb = sys.exc_info() self._last_traceback = tb logger = self._config["logger"] if logger and t is not StopIteration: logger.debug("Exception caught", exc_info=True) if t is SystemExit and self._config["propagate_SystemExit_locally"]: raise if t is KeyboardInterrupt and self._config["propagate_KeyboardInterrupt_locally"]: raise self._send(consts.MSG_EXCEPTION, seq, self._box_exc(t, v, tb)) else: self._send(consts.MSG_REPLY, seq, self._box(res)) def _box_exc(self, typ, val, tb): return vinegar.dump(typ, val, tb, include_local_traceback= self._config["include_local_traceback"]) def _unbox_exc(self, raw): return vinegar.load(raw, import_custom_exceptions = self._config["import_custom_exceptions"], instantiate_custom_exceptions = self._config["instantiate_custom_exceptions"], instantiate_oldstyle_exceptions = self._config["instantiate_oldstyle_exceptions"]) # # serving # def _dispatch(self, data): msg, seq, args = brine.load(data) if msg == consts.MSG_REQUEST: self._dispatch_request(seq, args) elif msg == consts.MSG_REPLY: obj = self._unbox(args) self._request_callbacks.pop(seq)(False, obj) elif msg == consts.MSG_EXCEPTION: obj = self._unbox_exc(args) self._request_callbacks.pop(seq)(True, obj) else: raise ValueError("invalid message type: %r" % (msg,)) def serve(self, timeout=1, wait_for_lock=True): """Serves a single request or reply that arrives within the given time frame (default is 1 sec). Note that the dispatching of a request might trigger multiple (nested) requests, thus this function may be reentrant. :returns: ``True`` if a request or reply were received, ``False`` otherwise. """ timeout = Timeout(timeout) with self._recv_event: if not self._recvlock.acquire(False): return (wait_for_lock and self._recv_event.wait(timeout.timeleft())) try: data = self._channel.poll(timeout) and self._channel.recv() if not data: return False except EOFError: self.close() raise finally: self._recvlock.release() with self._recv_event: self._recv_event.notify_all() self._dispatch(data) return True def poll(self, timeout = 0): """Serves a single transaction, should one arrives in the given interval. Note that handling a request/reply may trigger nested requests, which are all part of a single transaction. :returns: ``True`` if a transaction was served, ``False`` otherwise""" return self.serve(timeout, False) def serve_all(self): """Serves all requests and replies for as long as the connection is alive.""" try: while not self.closed: self.serve(None) except (socket.error, select_error, IOError): if not self.closed: raise except EOFError: pass finally: self.close() def serve_threaded(self, thread_count=10): """Serves all requests and replies for as long as the connection is alive.""" def _thread_target(): try: while True: self.serve(None) except (socket.error, select_error, IOError): if not self.closed: raise except EOFError: pass try: threads = [spawn(_thread_target) for _ in range(thread_count)] for thread in threads: thread.join() finally: self.close() def poll_all(self, timeout=0): """Serves all requests and replies that arrive within the given interval. :returns: ``True`` if at least a single transaction was served, ``False`` otherwise """ at_least_once = False timeout = Timeout(timeout) try: while True: if self.poll(timeout): at_least_once = True if timeout.expired(): break except EOFError: pass return at_least_once # # requests # def sync_request(self, handler, *args): """Sends a synchronous request (waits for the reply to arrive) :raises: any exception that the requets may be generated :returns: the result of the request """ timeout = self._config["sync_request_timeout"] return self.async_request(handler, *args, timeout=timeout).value def _async_request(self, handler, args = (), callback = (lambda a, b: None)): seq = self._get_seq_id() self._request_callbacks[seq] = callback try: self._send(consts.MSG_REQUEST, seq, (handler, self._box(args))) except: self._request_callbacks.pop(seq, None) raise def async_request(self, handler, *args, **kwargs): """Send an asynchronous request (does not wait for it to finish) :returns: an :class:`rpyc.core.async_.AsyncResult` object, which will eventually hold the result (or exception) """ timeout = kwargs.pop("timeout", None) if kwargs: raise TypeError("got unexpected keyword argument(s) %s" % (list(kwargs.keys()),)) res = AsyncResult(self) self._async_request(handler, args, res) if timeout is not None: res.set_expiry(timeout) return res @property def root(self): """Fetches the root object (service) of the other party""" if self._remote_root is None: self._remote_root = self.sync_request(consts.HANDLE_GETROOT) return self._remote_root # # attribute access # def _check_attr(self, obj, name, perm): config = self._config if not config[perm]: raise AttributeError("cannot access %r" % (name,)) prefix = config["allow_exposed_attrs"] and config["exposed_prefix"] plain = (config["allow_all_attrs"] or config["allow_exposed_attrs"] and name.startswith(prefix) or config["allow_safe_attrs"] and name in config["safe_attrs"] or config["allow_public_attrs"] and not name.startswith("_")) has_exposed = prefix and hasattr(obj, prefix+name) if plain and (not has_exposed or hasattr(obj, name)): return name if has_exposed: return prefix+name if plain: return name # chance for better traceback raise AttributeError("cannot access %r" % (name,)) def _access_attr(self, obj, name, args, overrider, param, default): if is_py3k: if type(name) is bytes: name = str(name, "utf8") elif type(name) is not str: raise TypeError("name must be a string") else: if type(name) not in (str, unicode): raise TypeError("name must be a string") name = str(name) # IronPython issue #10 + py3k issue accessor = getattr(type(obj), overrider, None) if accessor is None: accessor = default name = self._check_attr(obj, name, param) return accessor(obj, name, *args) # # request handlers # @classmethod def _request_handlers(cls): return { consts.HANDLE_PING: cls._handle_ping, consts.HANDLE_CLOSE: cls._handle_close, consts.HANDLE_GETROOT: cls._handle_getroot, consts.HANDLE_GETATTR: cls._handle_getattr, consts.HANDLE_DELATTR: cls._handle_delattr, consts.HANDLE_SETATTR: cls._handle_setattr, consts.HANDLE_CALL: cls._handle_call, consts.HANDLE_CALLATTR: cls._handle_callattr, consts.HANDLE_REPR: cls._handle_repr, consts.HANDLE_STR: cls._handle_str, consts.HANDLE_CMP: cls._handle_cmp, consts.HANDLE_HASH: cls._handle_hash, consts.HANDLE_DIR: cls._handle_dir, consts.HANDLE_PICKLE: cls._handle_pickle, consts.HANDLE_DEL: cls._handle_del, consts.HANDLE_INSPECT: cls._handle_inspect, consts.HANDLE_BUFFITER: cls._handle_buffiter, consts.HANDLE_OLDSLICING: cls._handle_oldslicing, consts.HANDLE_CTXEXIT: cls._handle_ctxexit, } def _handle_ping(self, data): return data def _handle_close(self): self._cleanup() def _handle_getroot(self): return self._local_root def _handle_del(self, obj, count=1): self._local_objects.decref(id(obj), count) def _handle_repr(self, obj): return repr(obj) def _handle_str(self, obj): return str(obj) def _handle_cmp(self, obj, other, op='__cmp__'): # cmp() might enter recursive resonance... yet another workaround #return cmp(obj, other) try: return getattr(type(obj), op)(obj, other) except (AttributeError, TypeError): return NotImplemented def _handle_hash(self, obj): return hash(obj) def _handle_call(self, obj, args, kwargs=()): return obj(*args, **dict(kwargs)) def _handle_dir(self, obj): return tuple(dir(obj)) def _handle_inspect(self, oid): return tuple(netref.inspect_methods(self._local_objects[oid])) def _handle_getattr(self, obj, name): return self._access_attr(obj, name, (), "_rpyc_getattr", "allow_getattr", getattr) def _handle_delattr(self, obj, name): return self._access_attr(obj, name, (), "_rpyc_delattr", "allow_delattr", delattr) def _handle_setattr(self, obj, name, value): return self._access_attr(obj, name, (value,), "_rpyc_setattr", "allow_setattr", setattr) def _handle_callattr(self, obj, name, args, kwargs=()): obj = self._handle_getattr(obj, name) return self._handle_call(obj, args, kwargs) def _handle_ctxexit(self, obj, exc): if exc: try: raise exc except: exc, typ, tb = sys.exc_info() else: typ = tb = None return self._handle_getattr(obj, "__exit__")(exc, typ, tb) def _handle_pickle(self, obj, proto): if not self._config["allow_pickle"]: raise ValueError("pickling is disabled") return bytes(pickle.dumps(obj, proto)) def _handle_buffiter(self, obj, count): return tuple(itertools.islice(obj, count)) def _handle_oldslicing(self, obj, attempt, fallback, start, stop, args): try: # first try __xxxitem__ getitem = self._handle_getattr(obj, attempt) return getitem(slice(start, stop), *args) except Exception: # fallback to __xxxslice__. see issue #41 if stop is None: stop = maxint getslice = self._handle_getattr(obj, fallback) return getslice(start, stop, *args)
class Connection(object): """The RPyC connection (also know as the RPyC protocol). * service: the service to expose * channel: the channcel over which messages are passed * config: this connection's config dict (overriding parameters from the default config dict) * _lazy: whether or not to initialize the service with the creation of the connection. default is True. if set to False, you will need to call _init_service manually later """ def __init__(self, service, channel, config = {}, _lazy = False): self._closed = True self._config = DEFAULT_CONFIG.copy() self._config.update(config) if self._config["connid"] is None: self._config["connid"] = "conn%d" % (_connection_id_generator.next(),) self._channel = channel self._seqcounter = itertools.count() self._recvlock = Lock() self._sendlock = Lock() self._sync_replies = {} self._async_callbacks = {} self._local_objects = RefCountingColl() self._last_traceback = None self._proxy_cache = WeakValueDict() self._netref_classes_cache = {} self._remote_root = None self._local_root = service(weakref.proxy(self)) if not _lazy: self._init_service() self._closed = False def _init_service(self): self._local_root.on_connect() def __del__(self): self.close() def __enter__(self): return self def __exit__(self, t, v, tb): self.close() def __repr__(self): a, b = object.__repr__(self).split(" object ") return "%s %r object %s" % (a, self._config["connid"], b) # # IO # def _cleanup(self, _anyway = True): if self._closed and not _anyway: return self._closed = True self._channel.close() self._local_root.on_disconnect() self._sync_replies.clear() self._async_callbacks.clear() self._local_objects.clear() self._proxy_cache.clear() self._netref_classes_cache.clear() self._last_traceback = None self._last_traceback = None self._remote_root = None self._local_root = None #self._seqcounter = None #self._config.clear() def close(self, _catchall = True): if self._closed: return self._closed = True try: try: self._async_request(consts.HANDLE_CLOSE) except EOFError: pass except Exception: if not _catchall: raise finally: self._cleanup(_anyway = True) @property def closed(self): return self._closed def fileno(self): return self._channel.fileno() def ping(self, data = "the world is a vampire!" * 20, timeout = 3): """assert that the other party is functioning properly""" res = self.async_request(consts.HANDLE_PING, data, timeout = timeout) if res.value != data: raise PingError("echo mismatches sent data") def _send(self, msg, seq, args): data = brine.dump((msg, seq, args)) self._sendlock.acquire() try: self._channel.send(data) finally: self._sendlock.release() def _send_request(self, handler, args): seq = self._seqcounter.next() self._send(consts.MSG_REQUEST, seq, (handler, self._box(args))) return seq def _send_reply(self, seq, obj): self._send(consts.MSG_REPLY, seq, self._box(obj)) def _send_exception(self, seq, exctype, excval, exctb): exc = vinegar.dump(exctype, excval, exctb, include_local_traceback = self._config["include_local_traceback"]) self._send(consts.MSG_EXCEPTION, seq, exc) # # boxing # def _box(self, obj): """store a local object in such a way that it could be recreated on the remote party either by-value or by-reference""" if brine.dumpable(obj): return consts.LABEL_VALUE, obj if type(obj) is tuple: return consts.LABEL_TUPLE, tuple(self._box(item) for item in obj) elif isinstance(obj, netref.BaseNetref) and obj.____conn__() is self: return consts.LABEL_LOCAL_REF, obj.____oid__ else: self._local_objects.add(obj) try: cls = obj.__class__ except Exception: # see issue #16 cls = type(obj) return consts.LABEL_REMOTE_REF, (id(obj), cls.__name__, cls.__module__) def _unbox(self, package): """recreate a local object representation of the remote object: if the object is passed by value, just return it; if the object is passed by reference, create a netref to it""" label, value = package if label == consts.LABEL_VALUE: return value if label == consts.LABEL_TUPLE: return tuple(self._unbox(item) for item in value) if label == consts.LABEL_LOCAL_REF: return self._local_objects[value] if label == consts.LABEL_REMOTE_REF: oid, clsname, modname = value if oid in self._proxy_cache: return self._proxy_cache[oid] proxy = self._netref_factory(oid, clsname, modname) self._proxy_cache[oid] = proxy return proxy raise ValueError("invalid label %r" % (label,)) def _netref_factory(self, oid, clsname, modname): typeinfo = (clsname, modname) if typeinfo in self._netref_classes_cache: cls = self._netref_classes_cache[typeinfo] elif typeinfo in netref.builtin_classes_cache: cls = netref.builtin_classes_cache[typeinfo] else: info = self.sync_request(consts.HANDLE_INSPECT, oid) cls = netref.class_factory(clsname, modname, info) self._netref_classes_cache[typeinfo] = cls return cls(weakref.ref(self), oid) # # dispatching # def _dispatch_request(self, seq, raw_args): try: handler, args = raw_args args = self._unbox(args) res = self._HANDLERS[handler](self, *args) except KeyboardInterrupt: raise except: # need to catch old style exceptions too t, v, tb = sys.exc_info() self._last_traceback = tb if t is SystemExit and self._config["propagate_SystemExit_locally"]: raise self._send_exception(seq, t, v, tb) else: self._send_reply(seq, res) def _dispatch_reply(self, seq, raw): obj = self._unbox(raw) if seq in self._async_callbacks: self._async_callbacks.pop(seq)(False, obj) else: self._sync_replies[seq] = (False, obj) def _dispatch_exception(self, seq, raw): obj = vinegar.load(raw, import_custom_exceptions = self._config["import_custom_exceptions"], instantiate_custom_exceptions = self._config["instantiate_custom_exceptions"], instantiate_oldstyle_exceptions = self._config["instantiate_oldstyle_exceptions"]) if seq in self._async_callbacks: self._async_callbacks.pop(seq)(True, obj) else: self._sync_replies[seq] = (True, obj) # # serving # def _recv(self, timeout, wait_for_lock): if not self._recvlock.acquire(wait_for_lock): return None try: try: if self._channel.poll(timeout): data = self._channel.recv() else: data = None except EOFError: self.close() raise finally: self._recvlock.release() return data def _dispatch(self, data): msg, seq, args = brine.load(data) if msg == consts.MSG_REQUEST: self._dispatch_request(seq, args) elif msg == consts.MSG_REPLY: self._dispatch_reply(seq, args) elif msg == consts.MSG_EXCEPTION: self._dispatch_exception(seq, args) else: raise ValueError("invalid message type: %r" % (msg,)) def poll(self, timeout = 0): """serve a single transaction, should one arrives in the given interval. note that handling a request/reply may trigger nested requests, which are all part of the transaction. returns True if one was served, False otherwise""" data = self._recv(timeout, wait_for_lock = False) if not data: return False self._dispatch(data) return True def serve(self, timeout = 1): """serve a single request or reply that arrives within the given time frame (default is 1 sec). note that the dispatching of a request might trigger multiple (nested) requests, thus this function may be reentrant. returns True if a request or reply were received, False otherwise.""" data = self._recv(timeout, wait_for_lock = True) if not data: return False self._dispatch(data) return True def serve_all(self): """serve all requests and replies while the connection is alive""" try: try: while True: self.serve(0.1) except select.error: if not self.closed: raise except EOFError: pass finally: self.close() def poll_all(self, timeout = 0): """serve all requests and replies that arrive within the given interval. returns True if at least one was served, False otherwise""" at_least_once = False try: while self.poll(timeout): at_least_once = True except EOFError: pass return at_least_once # # requests # def sync_request(self, handler, *args): """send a request and wait for the reply to arrive""" seq = self._send_request(handler, args) while seq not in self._sync_replies: self.serve(0.1) isexc, obj = self._sync_replies.pop(seq) if isexc: raise obj else: return obj def _async_request(self, handler, args = (), callback = (lambda a, b: None)): seq = self._send_request(handler, args) self._async_callbacks[seq] = callback def async_request(self, handler, *args, **kwargs): """send a request and return an AsyncResult object, which will eventually hold the reply""" timeout = kwargs.pop("timeout", None) if kwargs: raise TypeError("got unexpected keyword argument %r" % (kwargs.keys()[0],)) res = AsyncResult(weakref.proxy(self)) self._async_request(handler, args, res) if timeout is not None: res.set_expiry(timeout) return res @property def root(self): """fetch the root object of the other party""" if self._remote_root is None: self._remote_root = self.sync_request(consts.HANDLE_GETROOT) return self._remote_root # # attribute access # def _check_attr(self, obj, name): if self._config["allow_exposed_attrs"]: if name.startswith(self._config["exposed_prefix"]): name2 = name else: name2 = self._config["exposed_prefix"] + name if hasattr(obj, name2): return name2 if self._config["allow_all_attrs"]: return name if self._config["allow_safe_attrs"] and name in self._config["safe_attrs"]: return name if self._config["allow_public_attrs"] and not name.startswith("_"): return name return False def _access_attr(self, oid, name, args, overrider, param, default): if type(name) is unicode: name = str(name) # IronPython issue #10 elif type(name) is not str: raise TypeError("attr name must be a string") obj = self._local_objects[oid] accessor = getattr(type(obj), overrider, None) if accessor is None: name2 = self._check_attr(obj, name) if not self._config[param] or not name2: raise AttributeError("cannot access %r" % (name,)) accessor = default name = name2 return accessor(obj, name, *args) # # handlers # def _handle_ping(self, data): return data def _handle_close(self): self._cleanup() def _handle_getroot(self): return self._local_root def _handle_del(self, oid): self._local_objects.decref(oid) def _handle_repr(self, oid): return repr(self._local_objects[oid]) def _handle_str(self, oid): return str(self._local_objects[oid]) def _handle_cmp(self, oid, other): # cmp() might enter recursive resonance... yet another workaround #return cmp(self._local_objects[oid], other) obj = self._local_objects[oid] try: return type(obj).__cmp__(obj, other) except TypeError: return NotImplemented def _handle_hash(self, oid): return hash(self._local_objects[oid]) def _handle_call(self, oid, args, kwargs=()): return self._local_objects[oid](*args, **dict(kwargs)) def _handle_dir(self, oid): return tuple(dir(self._local_objects[oid])) def _handle_inspect(self, oid): return tuple(netref.inspect_methods(self._local_objects[oid])) def _handle_getattr(self, oid, name): return self._access_attr(oid, name, (), "_rpyc_getattr", "allow_getattr", getattr) def _handle_delattr(self, oid, name): return self._access_attr(oid, name, (), "_rpyc_delattr", "allow_delattr", delattr) def _handle_setattr(self, oid, name, value): return self._access_attr(oid, name, (value,), "_rpyc_setattr", "allow_setattr", setattr) def _handle_callattr(self, oid, name, args, kwargs): return self._handle_getattr(oid, name)(*args, **dict(kwargs)) def _handle_pickle(self, oid, proto): if not self._config["allow_pickle"]: raise ValueError("pickling is disabled") return pickle.dumps(self._local_objects[oid], proto) def _handle_buffiter(self, oid, count): items = [] obj = self._local_objects[oid] i = 0 try: while i < count: items.append(obj.next()) i += 1 except StopIteration: pass return tuple(items) # collect handlers _HANDLERS = {} for name, obj in locals().items(): if name.startswith("_handle_"): name2 = "HANDLE_" + name[8:].upper() if hasattr(consts, name2): _HANDLERS[getattr(consts, name2)] = obj else: raise NameError("no constant defined for %r", name) del name, name2, obj