def sync_request(self, handler, *args): """ Intercept outgoing synchronous requests from RPyC to add caching or fulfilling them locally if possible to improve performance. We should try to make as few remote calls as possible, because each call adds up to latency. """ if handler == consts.HANDLE_INSPECT: # always inspect classes from modin, pandas and numpy locally, # do not go to network for those id_name = str(args[0][0]) if id_name.split(".", 1)[0] in ("modin", "pandas", "numpy"): try: modobj = __import__(id_name) for subname in id_name.split(".")[1:]: modobj = getattr(modobj, subname) except (ImportError, AttributeError): pass else: return get_methods(netref.LOCAL_ATTRS, modobj) modname, clsname = id_name.rsplit(".", 1) try: modobj = __import__(modname) for subname in modname.split(".")[1:]: modobj = getattr(modobj, subname) clsobj = getattr(modobj, clsname) except (ImportError, AttributeError): pass else: return get_methods(netref.LOCAL_ATTRS, clsobj) elif handler in (consts.HANDLE_GETATTR, consts.HANDLE_STR, consts.HANDLE_HASH): if handler == consts.HANDLE_GETATTR: obj, attr = args key = (attr, handler) else: obj = args[0] key = handler if str(obj.____id_pack__[0]) in {"numpy", "numpy.dtype"}: # always assume numpy attributes and numpy.dtype attributes are always the same; # note that we're using RPyC id_pack as cache key, and it includes the name, # class id and instance id, so this cache is unique to each instance of, say, # numpy.dtype(), hence numpy.int16 and numpy.float64 got different caches. cache = self._static_cache[obj.____id_pack__] try: result = cache[key] except KeyError: result = cache[key] = super().sync_request(handler, *args) if handler == consts.HANDLE_GETATTR: # save an entry in our cache telling that we get this attribute cached self._static_cache[ result.____id_pack__]["__getattr__"] = True return result return super().sync_request(handler, *args)
def _handle_inspect(self, id_pack): # request handler if hasattr(self._local_objects[id_pack], '____conn__'): # When RPyC is chained (RPyC over RPyC), id_pack is cached in local objects as a netref # since __mro__ is not a safe attribute the request is forwarded using the proxy connection # see issue #346 or tests.test_rpyc_over_rpyc.Test_rpyc_over_rpyc conn = self._local_objects[id_pack].____conn__ return conn.sync_request(consts.HANDLE_INSPECT, id_pack) else: return tuple(get_methods(netref.LOCAL_ATTRS, self._local_objects[id_pack]))
cursor = len(name_pack) while cursor != -1: _module = sys.modules.get(name_pack[:cursor]) if _module is None: cursor = name_pack[:cursor].rfind('.') continue _class_name = name_pack[cursor + 1:] _class = getattr(_module, _class_name, None) if _class is not None and hasattr(_class, '__class__'): class_descriptor = NetrefClass(_class) break ns['__class__'] = class_descriptor netref_name = class_descriptor.owner.__name__ if class_descriptor is not None else name_pack # create methods that must perform a syncreq for name, doc in methods: name = str(name) # IronPython issue #10 # only create methods that wont shadow BaseNetref during merge for mro if name not in LOCAL_ATTRS: # i.e. `name != __class__` ns[name] = _make_method(name, doc) return type(netref_name, (BaseNetref, ), ns) for _builtin in _builtin_types: _id_pack = get_id_pack(_builtin) _name_pack = _id_pack[0] _normalized_builtin_types[_name_pack] = _builtin _builtin_methods = get_methods(LOCAL_ATTRS, _builtin) # assume all normalized builtins are classes builtin_classes_cache[_name_pack] = class_factory(_id_pack, _builtin_methods)
def _handle_inspect(self, id_pack): # request handler return tuple( get_methods(netref.LOCAL_ATTRS, self._local_objects[id_pack]))