def export_actor(actor_id, class_id, actor_method_names, num_cpus, num_gpus, worker): """Export an actor to redis. Args: actor_id: The ID of the actor. actor_method_names (list): A list of the names of this actor's methods. num_cpus (int): The number of CPUs that this actor requires. num_gpus (int): The number of GPUs that this actor requires. """ ray.worker.check_main_thread() if worker.mode is None: raise Exception("Actors cannot be created before Ray has been " "started. You can start Ray with 'ray.init()'.") key = b"Actor:" + actor_id.id() # For now, all actor methods have 1 return value. driver_id = worker.task_driver_id.id() for actor_method_name in actor_method_names: # TODO(rkn): When we create a second actor, we are probably overwriting # the values from the first actor here. This may or may not be a # problem. function_id = get_actor_method_function_id(actor_method_name).id() worker.function_properties[driver_id][function_id] = ( FunctionProperties(num_return_vals=1, num_cpus=1, num_gpus=0, num_custom_resource=0, max_calls=0)) # Select a local scheduler for the actor. local_scheduler_id = select_local_scheduler( worker.task_driver_id.id(), ray.global_state.local_schedulers(), num_gpus, worker.redis_client) assert local_scheduler_id is not None # We must put the actor information in Redis before publishing the actor # notification so that when the newly created actor attempts to fetch the # information from Redis, it is already there. worker.redis_client.hmset( key, { "class_id": class_id, "driver_id": driver_id, "local_scheduler_id": local_scheduler_id, "num_gpus": num_gpus, "removed": False }) # TODO(rkn): There is actually no guarantee that the local scheduler that # we are publishing to has already subscribed to the actor_notifications # channel. Therefore, this message may be missed and the workload will # hang. This is a bug. ray.utils.publish_actor_creation(actor_id.id(), driver_id, local_scheduler_id, False, worker.redis_client)
def export_actor(actor_id, class_id, class_name, actor_method_names, actor_method_num_return_vals, resources, worker): """Export an actor to redis. Args: actor_id (common.ObjectID): The ID of the actor. class_id (str): A random ID for the actor class. class_name (str): The actor class name. actor_method_names (list): A list of the names of this actor's methods. actor_method_num_return_vals: A list of the number of return values for each of the actor's methods. resources: A dictionary mapping resource name to the quantity of that resource required by the actor. """ ray.worker.check_main_thread() if worker.mode is None: raise Exception("Actors cannot be created before Ray has been " "started. You can start Ray with 'ray.init()'.") driver_id = worker.task_driver_id.id() register_actor_signatures(worker, driver_id, class_name, actor_method_names, actor_method_num_return_vals) # Select a local scheduler for the actor. key = b"Actor:" + actor_id.id() local_scheduler_id = select_local_scheduler( worker.task_driver_id.id(), ray.global_state.local_schedulers(), resources.get("GPU", 0), worker.redis_client) assert local_scheduler_id is not None # We must put the actor information in Redis before publishing the actor # notification so that when the newly created actor attempts to fetch the # information from Redis, it is already there. driver_id = worker.task_driver_id.id() worker.redis_client.hmset( key, { "class_id": class_id, "driver_id": driver_id, "local_scheduler_id": local_scheduler_id, "num_gpus": resources.get("GPU", 0), "removed": False }) # TODO(rkn): There is actually no guarantee that the local scheduler that # we are publishing to has already subscribed to the actor_notifications # channel. Therefore, this message may be missed and the workload will # hang. This is a bug. ray.utils.publish_actor_creation(actor_id.id(), driver_id, local_scheduler_id, False, worker.redis_client)