def get_signature_params(func): """Get signature parameters Support Cython functions by grabbing relevant attributes from the Cython function and attaching to a no-op function. This is somewhat brittle, since funcsigs may change, but given that funcsigs is written to a PEP, we hope it is relatively stable. Future versions of Python may allow overloading the inspect 'isfunction' and 'ismethod' functions / create ABC for Python functions. Until then, it appears that Cython won't do anything about compatability with the inspect module. Args: func: The function whose signature should be checked. Raises: TypeError: A type error if the signature is not supported """ # The first condition for Cython functions, the latter for Cython instance # methods if is_cython(func): attrs = ["__code__", "__annotations__", "__defaults__", "__kwdefaults__"] if all([hasattr(func, attr) for attr in attrs]): original_func = func def func(): return for attr in attrs: setattr(func, attr, getattr(original_func, attr)) else: raise TypeError("{0!r} is not a Python function we can process" .format(func)) return list(funcsigs.signature(func).parameters.items())
def _do_export(self, remote_function): """Pickle a remote function and export it to redis. Args: remote_function: the RemoteFunction object. """ if self._worker.load_code_from_local: return # Work around limitations of Python pickling. function = remote_function._function function_name_global_valid = function.__name__ in function.__globals__ function_name_global_value = function.__globals__.get( function.__name__) # Allow the function to reference itself as a global variable if not is_cython(function): function.__globals__[function.__name__] = remote_function try: pickled_function = pickle.dumps(function) finally: # Undo our changes if function_name_global_valid: function.__globals__[function.__name__] = ( function_name_global_value) else: del function.__globals__[function.__name__] check_oversized_pickle(pickled_function, remote_function._function_name, "remote function", self._worker) key = (b"RemoteFunction:" + self._worker.current_job_id.binary() + b":" + remote_function._function_descriptor.function_id.binary()) self._worker.redis_client.hmset( key, { "job_id": self._worker.current_job_id.binary(), "function_id": remote_function._function_descriptor.function_id.binary(), "name": remote_function._function_name, "module": function.__module__, "function": pickled_function, "max_calls": remote_function._max_calls }) self._worker.redis_client.rpush("Exports", key)
def _do_export(self, remote_function): """Pickle a remote function and export it to redis. Args: remote_function: the RemoteFunction object. """ if self._worker.load_code_from_local: return # Work around limitations of Python pickling. function = remote_function._function function_name_global_valid = function.__name__ in function.__globals__ function_name_global_value = function.__globals__.get( function.__name__) # Allow the function to reference itself as a global variable if not is_cython(function): function.__globals__[function.__name__] = remote_function try: pickled_function = pickle.dumps(function) finally: # Undo our changes if function_name_global_valid: function.__globals__[function.__name__] = ( function_name_global_value) else: del function.__globals__[function.__name__] check_oversized_pickle(pickled_function, remote_function._function_name, "remote function", self._worker) key = (b"RemoteFunction:" + self._worker.task_driver_id.binary() + b":" + remote_function._function_descriptor.function_id.binary()) self._worker.redis_client.hmset( key, { "driver_id": self._worker.task_driver_id.binary(), "function_id": remote_function._function_descriptor. function_id.binary(), "name": remote_function._function_name, "module": function.__module__, "function": pickled_function, "max_calls": remote_function._max_calls }) self._worker.redis_client.rpush("Exports", key)
def get_signature(func): """Get signature parameters. Support Cython functions by grabbing relevant attributes from the Cython function and attaching to a no-op function. This is somewhat brittle, since inspect may change, but given that inspect is written to a PEP, we hope it is relatively stable. Future versions of Python may allow overloading the inspect 'isfunction' and 'ismethod' functions / create ABC for Python functions. Until then, it appears that Cython won't do anything about compatability with the inspect module. Args: func: The function whose signature should be checked. Returns: A function signature object, which includes the names of the keyword arguments as well as their default values. Raises: TypeError: A type error if the signature is not supported """ # The first condition for Cython functions, the latter for Cython instance # methods if is_cython(func): attrs = [ "__code__", "__annotations__", "__defaults__", "__kwdefaults__" ] if all(hasattr(func, attr) for attr in attrs): original_func = func def func(): return for attr in attrs: setattr(func, attr, getattr(original_func, attr)) else: raise TypeError( f"{func!r} is not a Python function we can process") return inspect.signature(func)
def pred(x): return (inspect.isfunction(x) or inspect.ismethod(x) or is_cython(x))
def remote(cls, *args, **kwargs): if ray.worker.global_worker.mode is None: raise Exception("Actors cannot be created before ray.init() " "has been called.") actor_id = random_actor_id() # The ID for this instance of ActorHandle. These should be unique # across instances with the same _ray_actor_id. actor_handle_id = ray.local_scheduler.ObjectID( ray.worker.NIL_ACTOR_ID) # The actor cursor is a dummy object representing the most recent # actor method invocation. For each subsequent method invocation, # the current cursor should be added as a dependency, and then # updated to reflect the new invocation. actor_cursor = None # The number of actor method invocations that we've called so far. actor_counter = 0 # Get the actor methods of the given class. actor_methods = inspect.getmembers( Class, predicate=(lambda x: (inspect.isfunction(x) or inspect.ismethod(x) or is_cython(x)))) # Extract the signatures of each of the methods. This will be used # to catch some errors if the methods are called with inappropriate # arguments. method_signatures = dict() for k, v in actor_methods: # Print a warning message if the method signature is not # supported. We don't raise an exception because if the actor # inherits from a class that has a method whose signature we # don't support, we there may not be much the user can do about # it. signature.check_signature_supported(v, warn=True) method_signatures[k] = signature.extract_signature( v, ignore_first=True) actor_method_names = [method_name for method_name, _ in actor_methods] actor_method_num_return_vals = [] for _, method in actor_methods: if hasattr(method, "__ray_num_return_vals__"): actor_method_num_return_vals.append( method.__ray_num_return_vals__) else: actor_method_num_return_vals.append(1) # Do not export the actor class or the actor if run in PYTHON_MODE # Instead, instantiate the actor locally and add it to # global_worker's dictionary if ray.worker.global_worker.mode == ray.PYTHON_MODE: ray.worker.global_worker.actors[actor_id] = ( Class.__new__(Class)) else: # Export the actor. if not exported: export_actor_class(class_id, Class, actor_method_names, actor_method_num_return_vals, checkpoint_interval, ray.worker.global_worker) exported.append(0) actor_cursor = export_actor(actor_id, class_id, class_name, actor_method_names, actor_method_num_return_vals, actor_creation_resources, actor_method_cpus, ray.worker.global_worker) # Instantiate the actor handle. actor_object = cls.__new__(cls) actor_object._manual_init(actor_id, class_id, actor_handle_id, actor_cursor, actor_counter, actor_method_names, actor_method_num_return_vals, method_signatures, checkpoint_interval, actor_cursor, actor_creation_resources, actor_method_cpus) # Call __init__ as a remote function. if "__init__" in actor_object._ray_actor_method_names: actor_object._actor_method_call("__init__", args=args, kwargs=kwargs, dependency=actor_cursor) else: print("WARNING: this object has no __init__ method.") return actor_object
def fetch_and_register_actor(actor_class_key, resources, worker): """Import an actor. This will be called by the worker's import thread when the worker receives the actor_class export, assuming that the worker is an actor for that class. Args: actor_class_key: The key in Redis to use to fetch the actor. resources: The resources required for this actor's lifetime. worker: The worker to use. """ actor_id_str = worker.actor_id (driver_id, class_id, class_name, module, pickled_class, checkpoint_interval, actor_method_names, actor_method_num_return_vals) = worker.redis_client.hmget( actor_class_key, ["driver_id", "class_id", "class_name", "module", "class", "checkpoint_interval", "actor_method_names", "actor_method_num_return_vals"]) actor_name = class_name.decode("ascii") module = module.decode("ascii") checkpoint_interval = int(checkpoint_interval) actor_method_names = json.loads(actor_method_names.decode("ascii")) actor_method_num_return_vals = json.loads( actor_method_num_return_vals.decode("ascii")) # Create a temporary actor with some temporary methods so that if the actor # fails to be unpickled, the temporary actor can be used (just to produce # error messages and to prevent the driver from hanging). class TemporaryActor(object): pass worker.actors[actor_id_str] = TemporaryActor() worker.actor_checkpoint_interval = checkpoint_interval def temporary_actor_method(*xs): raise Exception("The actor with name {} failed to be imported, and so " "cannot execute this method".format(actor_name)) # Register the actor method signatures. register_actor_signatures(worker, driver_id, class_id, class_name, actor_method_names, actor_method_num_return_vals) # Register the actor method executors. for actor_method_name in actor_method_names: function_id = compute_actor_method_function_id(class_name, actor_method_name).id() temporary_executor = make_actor_method_executor(worker, actor_method_name, temporary_actor_method, actor_imported=False) worker.functions[driver_id][function_id] = (actor_method_name, temporary_executor) worker.num_task_executions[driver_id][function_id] = 0 try: unpickled_class = pickle.loads(pickled_class) worker.actor_class = unpickled_class except Exception: # If an exception was thrown when the actor was imported, we record the # traceback and notify the scheduler of the failure. traceback_str = ray.utils.format_error_message(traceback.format_exc()) # Log the error message. push_error_to_driver(worker.redis_client, "register_actor_signatures", traceback_str, driver_id, data={"actor_id": actor_id_str}) # TODO(rkn): In the future, it might make sense to have the worker exit # here. However, currently that would lead to hanging if someone calls # ray.get on a method invoked on the actor. else: # TODO(pcm): Why is the below line necessary? unpickled_class.__module__ = module worker.actors[actor_id_str] = unpickled_class.__new__(unpickled_class) actor_methods = inspect.getmembers( unpickled_class, predicate=(lambda x: (inspect.isfunction(x) or inspect.ismethod(x) or is_cython(x)))) for actor_method_name, actor_method in actor_methods: function_id = compute_actor_method_function_id( class_name, actor_method_name).id() executor = make_actor_method_executor(worker, actor_method_name, actor_method, actor_imported=True) worker.functions[driver_id][function_id] = (actor_method_name, executor)