def step(self, *args, **kwargs): flattened_args = signature.flatten_args(self._signature, args, kwargs) actor_id = workflow_context.get_current_workflow_id() if not self.readonly: if self._method_name == "__init__": state_ref = None else: ws = WorkflowStorage(actor_id, get_global_storage()) state_ref = WorkflowRef(ws.get_entrypoint_step_id()) # This is a hack to insert a positional argument. flattened_args = [signature.DUMMY_TYPE, state_ref] + flattened_args workflow_inputs = serialization_context.make_workflow_inputs( flattened_args) if self.readonly: _actor_method = _wrap_readonly_actor_method( actor_id, self._original_class, self._method_name) else: _actor_method = _wrap_actor_method(self._original_class, self._method_name) workflow_data = WorkflowData( func_body=_actor_method, inputs=workflow_inputs, name=self._name, step_options=self._options, user_metadata=self._user_metadata, ) wf = Workflow(workflow_data) return wf
async def batch_wrapper(*args, **kwargs): self = extract_self_if_method_call(args, _func) flattened_args: List = flatten_args(extract_signature(_func), args, kwargs) if self is None: # For functions, inject the batch queue as an # attribute of the function. batch_queue_object = _func else: # For methods, inject the batch queue as an # attribute of the object. batch_queue_object = self # Trim the self argument from methods flattened_args = flattened_args[2:] # The first time the function runs, we lazily construct the batch # queue and inject it under a custom attribute name. On subsequent # runs, we just get a reference to the attribute. batch_queue_attr = f"__serve_batch_queue_{_func.__name__}" if not hasattr(batch_queue_object, batch_queue_attr): batch_queue = _BatchQueue(max_batch_size, batch_wait_timeout_s, _func) setattr(batch_queue_object, batch_queue_attr, batch_queue) else: batch_queue = getattr(batch_queue_object, batch_queue_attr) future = asyncio.get_event_loop().create_future() batch_queue.put(SingleRequest(self, flattened_args, future)) # This will raise if the underlying call raised an exception. return await future
def _build_workflow(*args, **kwargs) -> Workflow: flattened_args = signature.flatten_args(self._func_signature, args, kwargs) def prepare_inputs(): ensure_ray_initialized() return serialization_context.make_workflow_inputs(flattened_args) nonlocal step_options if step_options is None: step_options = WorkflowStepRuntimeOptions.make( step_type=StepType.FUNCTION ) # We could have "checkpoint=None" when we use @workflow.step # with arguments. Avoid this by updating it here. step_options.checkpoint = _inherit_checkpoint_option( step_options.checkpoint ) workflow_data = WorkflowData( func_body=self._func, inputs=None, step_options=step_options, name=self._name, user_metadata=self._user_metadata, ) return Workflow(workflow_data, prepare_inputs)
def _actor_method_call(self, method_name, args=None, kwargs=None, name="", num_returns=None): """Method execution stub for an actor handle. This is the function that executes when `actor.method_name.remote(*args, **kwargs)` is called. Instead of executing locally, the method is packaged as a task and scheduled to the remote actor instance. Args: method_name: The name of the actor method to execute. args: A list of arguments for the actor method. kwargs: A dictionary of keyword arguments for the actor method. name (str): The name to give the actor method call task. num_returns (int): The number of return values for the method. Returns: object_refs: A list of object refs returned by the remote actor method. """ worker = ray.worker.global_worker args = args or [] kwargs = kwargs or {} if self._ray_is_cross_language: list_args = cross_language.format_args(worker, args, kwargs) function_descriptor = \ cross_language.get_function_descriptor_for_actor_method( self._ray_actor_language, self._ray_actor_creation_function_descriptor, method_name) else: function_signature = self._ray_method_signatures[method_name] if not args and not kwargs and not function_signature: list_args = [] else: list_args = signature.flatten_args(function_signature, args, kwargs) function_descriptor = self._ray_function_descriptor[method_name] if worker.mode == ray.LOCAL_MODE: assert not self._ray_is_cross_language,\ "Cross language remote actor method " \ "cannot be executed locally." object_refs = worker.core_worker.submit_actor_task( self._ray_actor_language, self._ray_actor_id, function_descriptor, list_args, name, num_returns, self._ray_actor_method_cpus) if len(object_refs) == 1: object_refs = object_refs[0] elif len(object_refs) == 0: object_refs = None return object_refs
def _build_workflow(*args, **kwargs) -> Workflow: flattened_args = signature.flatten_args(self._func_signature, args, kwargs) workflow_inputs = serialization_context.make_workflow_inputs( flattened_args) workflow_data = WorkflowData( func_body=self._func, inputs=workflow_inputs, max_retries=self._max_retries, catch_exceptions=self._catch_exceptions, ray_options=self._ray_options, ) return Workflow(workflow_data)
def flatten_args(self, method_name: str, args: Tuple[Any], kwargs: Dict[str, Any]) -> List[Any]: """Check and flatten arguments of the actor method. Args: method_name: The name of the actor method in the actor class. args: Positional arguments. kwargs: Keywords arguments. Returns: Flattened arguments. """ return signature.flatten_args(self.signatures[method_name], args, kwargs)
def _build_workflow(*args, **kwargs) -> Workflow: flattened_args = signature.flatten_args(self._func_signature, args, kwargs) def prepare_inputs(): ensure_ray_initialized() return serialization_context.make_workflow_inputs(flattened_args) workflow_data = WorkflowData( func_body=self._func, inputs=None, step_options=step_options, name=self._name, user_metadata=self._user_metadata, ) return Workflow(workflow_data, prepare_inputs)
def _build_workflow(*args, **kwargs) -> Workflow: flattened_args = signature.flatten_args(self._func_signature, args, kwargs) def prepare_inputs(): ensure_ray_initialized() return serialization_context.make_workflow_inputs( flattened_args) workflow_data = WorkflowData( func_body=self._func, step_type=StepType.FUNCTION, inputs=None, max_retries=self._max_retries, catch_exceptions=self._catch_exceptions, ray_options=self._ray_options, name=self._name, ) return Workflow(workflow_data, prepare_inputs)
def _remote(self, args=None, kwargs=None, num_cpus=None, num_gpus=None, memory=None, object_store_memory=None, resources=None, accelerator_type=None, max_concurrency=None, max_restarts=None, max_task_retries=None, name=None, namespace=None, lifetime=None, placement_group="default", placement_group_bundle_index=-1, placement_group_capture_child_tasks=None, runtime_env=None): """Create an actor. This method allows more flexibility than the remote method because resource requirements can be specified and override the defaults in the decorator. Args: args: The arguments to forward to the actor constructor. kwargs: The keyword arguments to forward to the actor constructor. num_cpus: The number of CPUs required by the actor creation task. num_gpus: The number of GPUs required by the actor creation task. memory: Restrict the heap memory usage of this actor. object_store_memory: Restrict the object store memory used by this actor when creating objects. resources: The custom resources required by the actor creation task. max_concurrency: The max number of concurrent calls to allow for this actor. This only works with direct actor calls. The max concurrency defaults to 1 for threaded execution, and 1000 for asyncio execution. Note that the execution order is not guaranteed when max_concurrency > 1. name: The globally unique name for the actor, which can be used to retrieve the actor via ray.get_actor(name) as long as the actor is still alive. namespace: Override the namespace to use for the actor. By default, actors are created in an anonymous namespace. The actor can be retrieved via ray.get_actor(name=name, namespace=namespace). lifetime: Either `None`, which defaults to the actor will fate share with its creator and will be deleted once its refcount drops to zero, or "detached", which means the actor will live as a global object independent of the creator. placement_group: the placement group this actor belongs to, or None if it doesn't belong to any group. Setting to "default" autodetects the placement group based on the current setting of placement_group_capture_child_tasks. placement_group_bundle_index: the index of the bundle if the actor belongs to a placement group, which may be -1 to specify any available bundle. placement_group_capture_child_tasks: Whether or not children tasks of this actor should implicitly use the same placement group as its parent. It is True by default. runtime_env (Dict[str, Any]): Specifies the runtime environment for this actor or task and its children (see :ref:`runtime-environments` for details). This API is in beta and may change before becoming stable. Returns: A handle to the newly created actor. """ if args is None: args = [] if kwargs is None: kwargs = {} meta = self.__ray_metadata__ actor_has_async_methods = len( inspect.getmembers(meta.modified_class, predicate=inspect.iscoroutinefunction)) > 0 is_asyncio = actor_has_async_methods if max_concurrency is None: if is_asyncio: max_concurrency = 1000 else: max_concurrency = 1 if max_concurrency < 1: raise ValueError("max_concurrency must be >= 1") if client_mode_should_convert(auto_init=True): return client_mode_convert_actor( self, args, kwargs, num_cpus=num_cpus, num_gpus=num_gpus, memory=memory, object_store_memory=object_store_memory, resources=resources, accelerator_type=accelerator_type, max_concurrency=max_concurrency, max_restarts=max_restarts, max_task_retries=max_task_retries, name=name, namespace=namespace, lifetime=lifetime, placement_group=placement_group, placement_group_bundle_index=placement_group_bundle_index, placement_group_capture_child_tasks=( placement_group_capture_child_tasks), runtime_env=runtime_env) worker = ray.worker.global_worker worker.check_connected() if name is not None: if not isinstance(name, str): raise TypeError( f"name must be None or a string, got: '{type(name)}'.") elif name == "": raise ValueError("Actor name cannot be an empty string.") if namespace is not None: ray._private.utils.validate_namespace(namespace) # Check whether the name is already taken. # TODO(edoakes): this check has a race condition because two drivers # could pass the check and then create the same named actor. We should # instead check this when we create the actor, but that's currently an # async call. if name is not None: try: ray.get_actor(name, namespace=namespace) except ValueError: # Name is not taken. pass else: raise ValueError( f"The name {name} (namespace={namespace}) is already " "taken. Please use " "a different name or get the existing actor using " f"ray.get_actor('{name}', namespace='{namespace}')") if lifetime is None: detached = False elif lifetime == "detached": detached = True else: raise ValueError( "actor `lifetime` argument must be either `None` or 'detached'" ) # Set the actor's default resources if not already set. First three # conditions are to check that no resources were specified in the # decorator. Last three conditions are to check that no resources were # specified when _remote() was called. if (meta.num_cpus is None and meta.num_gpus is None and meta.resources is None and meta.accelerator_type is None and num_cpus is None and num_gpus is None and resources is None and accelerator_type is None): # In the default case, actors acquire no resources for # their lifetime, and actor methods will require 1 CPU. cpus_to_use = ray_constants.DEFAULT_ACTOR_CREATION_CPU_SIMPLE actor_method_cpu = ray_constants.DEFAULT_ACTOR_METHOD_CPU_SIMPLE else: # If any resources are specified (here or in decorator), then # all resources are acquired for the actor's lifetime and no # resources are associated with methods. cpus_to_use = (ray_constants.DEFAULT_ACTOR_CREATION_CPU_SPECIFIED if meta.num_cpus is None else meta.num_cpus) actor_method_cpu = ray_constants.DEFAULT_ACTOR_METHOD_CPU_SPECIFIED # LOCAL_MODE cannot handle cross_language if worker.mode == ray.LOCAL_MODE: assert not meta.is_cross_language, \ "Cross language ActorClass cannot be executed locally." # Export the actor. if not meta.is_cross_language and (meta.last_export_session_and_job != worker.current_session_and_job): # If this actor class was not exported in this session and job, # we need to export this function again, because current GCS # doesn't have it. meta.last_export_session_and_job = (worker.current_session_and_job) # After serialize / deserialize modified class, the __module__ # of modified class will be ray.cloudpickle.cloudpickle. # So, here pass actor_creation_function_descriptor to make # sure export actor class correct. worker.function_actor_manager.export_actor_class( meta.modified_class, meta.actor_creation_function_descriptor, meta.method_meta.methods.keys()) resources = ray._private.utils.resources_from_resource_arguments( cpus_to_use, meta.num_gpus, meta.memory, meta.object_store_memory, meta.resources, meta.accelerator_type, num_cpus, num_gpus, memory, object_store_memory, resources, accelerator_type) # If the actor methods require CPU resources, then set the required # placement resources. If actor_placement_resources is empty, then # the required placement resources will be the same as resources. actor_placement_resources = {} assert actor_method_cpu in [0, 1] if actor_method_cpu == 1: actor_placement_resources = resources.copy() actor_placement_resources["CPU"] += 1 if meta.is_cross_language: creation_args = cross_language.format_args(worker, args, kwargs) else: function_signature = meta.method_meta.signatures["__init__"] creation_args = signature.flatten_args(function_signature, args, kwargs) if placement_group_capture_child_tasks is None: placement_group_capture_child_tasks = ( worker.should_capture_child_tasks_in_placement_group) placement_group = configure_placement_group_based_on_context( placement_group_capture_child_tasks, placement_group_bundle_index, resources, actor_placement_resources, meta.class_name, placement_group=placement_group) if runtime_env: if isinstance(runtime_env, str): # Serialzed protobuf runtime env from Ray client. new_runtime_env = runtime_env elif isinstance(runtime_env, ParsedRuntimeEnv): new_runtime_env = runtime_env.serialize() else: raise TypeError(f"Error runtime env type {type(runtime_env)}") else: new_runtime_env = meta.runtime_env concurrency_groups_dict = {} for cg_name in meta.concurrency_groups: concurrency_groups_dict[cg_name] = { "name": cg_name, "max_concurrency": meta.concurrency_groups[cg_name], "function_descriptors": [], } # Update methods for method_name in meta.method_meta.concurrency_group_for_methods: cg_name = meta.method_meta.concurrency_group_for_methods[ method_name] assert cg_name in concurrency_groups_dict module_name = meta.actor_creation_function_descriptor.module_name class_name = meta.actor_creation_function_descriptor.class_name concurrency_groups_dict[cg_name]["function_descriptors"].append( PythonFunctionDescriptor(module_name, method_name, class_name)) actor_id = worker.core_worker.create_actor( meta.language, meta.actor_creation_function_descriptor, creation_args, max_restarts or meta.max_restarts, max_task_retries or meta.max_task_retries, resources, actor_placement_resources, max_concurrency, detached, name if name is not None else "", namespace if namespace is not None else "", is_asyncio, placement_group.id, placement_group_bundle_index, placement_group_capture_child_tasks, # Store actor_method_cpu in actor handle's extension data. extension_data=str(actor_method_cpu), serialized_runtime_env=new_runtime_env or "{}", concurrency_groups_dict=concurrency_groups_dict or dict()) actor_handle = ActorHandle(meta.language, actor_id, meta.method_meta.decorators, meta.method_meta.signatures, meta.method_meta.num_returns, actor_method_cpu, meta.actor_creation_function_descriptor, worker.current_session_and_job, original_handle=True) return actor_handle
def _node_visitor(node: Any) -> Any: if isinstance(node, FunctionNode): bound_options = node._bound_options.copy() num_returns = bound_options.get("num_returns", 1) if num_returns is None: # ray could use `None` as default value num_returns = 1 if num_returns > 1: raise ValueError("Workflow steps can only have one return.") workflow_options = bound_options.pop("_metadata", {}).get(WORKFLOW_OPTIONS, {}) # If checkpoint option is not specified, inherit checkpoint # options from context (i.e. checkpoint options of the outer # step). If it is still not specified, it's True by default. checkpoint = workflow_options.get("checkpoint", None) if checkpoint is None: checkpoint = context.checkpoint if context is not None else True # When it returns a nested workflow, catch_exception # should be passed recursively. catch_exceptions = workflow_options.get("catch_exceptions", None) if catch_exceptions is None: # TODO(suquark): should we also handle exceptions from a "leaf node" # in the continuation? For example, we have a workflow # > @ray.remote # > def A(): pass # > @ray.remote # > def B(x): return x # > @ray.remote # > def C(x): return workflow.continuation(B.bind(A.bind())) # > dag = C.options(**workflow.options(catch_exceptions=True)).bind() # Should C catches exceptions of A? if node.get_stable_uuid() == dag_node.get_stable_uuid(): # 'catch_exception' context should be passed down to # its direct continuation task. # In this case, the direct continuation is the output node. catch_exceptions = (context.catch_exceptions if context is not None else False) else: catch_exceptions = False max_retries = bound_options.get("max_retries", 3) if not isinstance(max_retries, int) or max_retries < -1: raise ValueError( "'max_retries' only accepts 0, -1 or a positive integer.") step_options = WorkflowStepRuntimeOptions( step_type=StepType.FUNCTION, catch_exceptions=catch_exceptions, max_retries=max_retries, allow_inplace=False, checkpoint=checkpoint, ray_options=bound_options, ) workflow_refs: List[WorkflowRef] = [] with serialization_context.workflow_args_serialization_context( workflow_refs): _func_signature = signature.extract_signature(node._body) flattened_args = signature.flatten_args( _func_signature, node._bound_args, node._bound_kwargs) # NOTE: When calling 'ray.put', we trigger python object # serialization. Under our serialization context, # Workflows are separated from the arguments, # leaving a placeholder object with all other python objects. # Then we put the placeholder object to object store, # so it won't be mutated later. This guarantees correct # semantics. See "tests/test_variable_mutable.py" as # an example. input_placeholder: ray.ObjectRef = ray.put(flattened_args) name = workflow_options.get("name") if name is None: name = f"{get_module(node._body)}.{slugify(get_qualname(node._body))}" task_id = ray.get(mgr.gen_step_id.remote(workflow_id, name)) state.add_dependencies(task_id, [s.task_id for s in workflow_refs]) state.task_input_args[task_id] = input_placeholder user_metadata = workflow_options.pop("metadata", {}) validate_user_metadata(user_metadata) state.tasks[task_id] = Task( name=name, options=step_options, user_metadata=user_metadata, func_body=node._body, ) return WorkflowRef(task_id) if isinstance(node, InputAttributeNode): return node._execute_impl() # get data from input node if isinstance(node, InputNode): return input_context # replace input node with input data if not isinstance(node, DAGNode): return node # return normal objects raise TypeError(f"Unsupported DAG node: {node}")
def _remote(self, args=None, kwargs=None, num_cpus=None, num_gpus=None, memory=None, object_store_memory=None, resources=None, accelerator_type=None, max_concurrency=None, max_restarts=None, max_task_retries=None, name=None, lifetime=None, placement_group="default", placement_group_bundle_index=-1, placement_group_capture_child_tasks=None, runtime_env=None, override_environment_variables=None): """Create an actor. This method allows more flexibility than the remote method because resource requirements can be specified and override the defaults in the decorator. Args: args: The arguments to forward to the actor constructor. kwargs: The keyword arguments to forward to the actor constructor. num_cpus: The number of CPUs required by the actor creation task. num_gpus: The number of GPUs required by the actor creation task. memory: Restrict the heap memory usage of this actor. object_store_memory: Restrict the object store memory used by this actor when creating objects. resources: The custom resources required by the actor creation task. max_concurrency: The max number of concurrent calls to allow for this actor. This only works with direct actor calls. The max concurrency defaults to 1 for threaded execution, and 1000 for asyncio execution. Note that the execution order is not guaranteed when max_concurrency > 1. name: The globally unique name for the actor, which can be used to retrieve the actor via ray.get_actor(name) as long as the actor is still alive. Names may not contain '/'. lifetime: Either `None`, which defaults to the actor will fate share with its creator and will be deleted once its refcount drops to zero, or "detached", which means the actor will live as a global object independent of the creator. placement_group: the placement group this actor belongs to, or None if it doesn't belong to any group. Setting to "default" autodetects the placement group based on the current setting of placement_group_capture_child_tasks. placement_group_bundle_index: the index of the bundle if the actor belongs to a placement group, which may be -1 to specify any available bundle. placement_group_capture_child_tasks: Whether or not children tasks of this actor should implicitly use the same placement group as its parent. It is True by default. runtime_env (Dict[str, Any]): Specifies the runtime environment for this actor or task and its children (see ``runtime_env.py`` for more details). override_environment_variables: Environment variables to override and/or introduce for this actor. This is a dictionary mapping variable names to their values. Returns: A handle to the newly created actor. """ if args is None: args = [] if kwargs is None: kwargs = {} meta = self.__ray_metadata__ actor_has_async_methods = len( inspect.getmembers(meta.modified_class, predicate=inspect.iscoroutinefunction)) > 0 is_asyncio = actor_has_async_methods if max_concurrency is None: if is_asyncio: max_concurrency = 1000 else: max_concurrency = 1 if max_concurrency < 1: raise ValueError("max_concurrency must be >= 1") if client_mode_should_convert(): return client_mode_convert_actor( self, args, kwargs, num_cpus=num_cpus, num_gpus=num_gpus, memory=memory, object_store_memory=object_store_memory, resources=resources, accelerator_type=accelerator_type, max_concurrency=max_concurrency, max_restarts=max_restarts, max_task_retries=max_task_retries, name=name, lifetime=lifetime, placement_group=placement_group, placement_group_bundle_index=placement_group_bundle_index, placement_group_capture_child_tasks=( placement_group_capture_child_tasks), runtime_env=runtime_env, override_environment_variables=( override_environment_variables)) worker = ray.worker.global_worker worker.check_connected() if name is not None: if not isinstance(name, str): raise TypeError( f"name must be None or a string, got: '{type(name)}'.") elif name == "": raise ValueError("Actor name cannot be an empty string.") split_names = name.split("/", maxsplit=1) if len(split_names) <= 1: name = split_names[0] namespace = "" else: # must be length 2 namespace, name = split_names if "/" in name: raise ValueError("Actor name may not contain '/'.") else: namespace = "" # Check whether the name is already taken. # TODO(edoakes): this check has a race condition because two drivers # could pass the check and then create the same named actor. We should # instead check this when we create the actor, but that's currently an # async call. if name is not None: try: ray.get_actor(name) except ValueError: # Name is not taken. pass else: raise ValueError( f"The name {name} is already taken. Please use " "a different name or get the existing actor using " f"ray.get_actor('{name}')") if lifetime is None: detached = False elif lifetime == "detached": detached = True else: raise ValueError( "actor `lifetime` argument must be either `None` or 'detached'" ) if placement_group_capture_child_tasks is None: placement_group_capture_child_tasks = ( worker.should_capture_child_tasks_in_placement_group) if placement_group == "default": if placement_group_capture_child_tasks: placement_group = get_current_placement_group() else: placement_group = PlacementGroup.empty() if not placement_group: placement_group = PlacementGroup.empty() check_placement_group_index(placement_group, placement_group_bundle_index) # Set the actor's default resources if not already set. First three # conditions are to check that no resources were specified in the # decorator. Last three conditions are to check that no resources were # specified when _remote() was called. if (meta.num_cpus is None and meta.num_gpus is None and meta.resources is None and meta.accelerator_type is None and num_cpus is None and num_gpus is None and resources is None and accelerator_type is None): # In the default case, actors acquire no resources for # their lifetime, and actor methods will require 1 CPU. cpus_to_use = ray_constants.DEFAULT_ACTOR_CREATION_CPU_SIMPLE actor_method_cpu = ray_constants.DEFAULT_ACTOR_METHOD_CPU_SIMPLE else: # If any resources are specified (here or in decorator), then # all resources are acquired for the actor's lifetime and no # resources are associated with methods. cpus_to_use = (ray_constants.DEFAULT_ACTOR_CREATION_CPU_SPECIFIED if meta.num_cpus is None else meta.num_cpus) actor_method_cpu = ray_constants.DEFAULT_ACTOR_METHOD_CPU_SPECIFIED # LOCAL_MODE cannot handle cross_language if worker.mode == ray.LOCAL_MODE: assert not meta.is_cross_language, \ "Cross language ActorClass cannot be executed locally." # Export the actor. if not meta.is_cross_language and (meta.last_export_session_and_job != worker.current_session_and_job): # If this actor class was not exported in this session and job, # we need to export this function again, because current GCS # doesn't have it. meta.last_export_session_and_job = (worker.current_session_and_job) # After serialize / deserialize modified class, the __module__ # of modified class will be ray.cloudpickle.cloudpickle. # So, here pass actor_creation_function_descriptor to make # sure export actor class correct. worker.function_actor_manager.export_actor_class( meta.modified_class, meta.actor_creation_function_descriptor, meta.method_meta.methods.keys()) resources = ray._private.utils.resources_from_resource_arguments( cpus_to_use, meta.num_gpus, meta.memory, meta.object_store_memory, meta.resources, meta.accelerator_type, num_cpus, num_gpus, memory, object_store_memory, resources, accelerator_type) # If the actor methods require CPU resources, then set the required # placement resources. If actor_placement_resources is empty, then # the required placement resources will be the same as resources. actor_placement_resources = {} assert actor_method_cpu in [0, 1] if actor_method_cpu == 1: actor_placement_resources = resources.copy() actor_placement_resources["CPU"] += 1 if meta.is_cross_language: creation_args = cross_language.format_args(worker, args, kwargs) else: function_signature = meta.method_meta.signatures["__init__"] creation_args = signature.flatten_args(function_signature, args, kwargs) if runtime_env is None: runtime_env = meta.runtime_env if runtime_env: if runtime_env.get("working_dir"): raise NotImplementedError( "Overriding working_dir for actors is not supported. " "Please use ray.init(runtime_env={'working_dir': ...}) " "to configure per-job environment instead.") runtime_env_dict = runtime_support.RuntimeEnvDict( runtime_env).get_parsed_dict() else: runtime_env_dict = {} if override_environment_variables: logger.warning("override_environment_variables is deprecated and " "will be removed in Ray 1.6. Please use " ".options(runtime_env={'env_vars': {...}}).remote()" "instead.") actor_id = worker.core_worker.create_actor( meta.language, meta.actor_creation_function_descriptor, creation_args, max_restarts or meta.max_restarts, max_task_retries or meta.max_task_retries, resources, actor_placement_resources, max_concurrency, detached, name if name is not None else "", namespace, is_asyncio, placement_group.id, placement_group_bundle_index, placement_group_capture_child_tasks, # Store actor_method_cpu in actor handle's extension data. extension_data=str(actor_method_cpu), runtime_env_dict=runtime_env_dict, override_environment_variables=override_environment_variables or dict()) actor_handle = ActorHandle(meta.language, actor_id, meta.method_meta.decorators, meta.method_meta.signatures, meta.method_meta.num_returns, actor_method_cpu, meta.actor_creation_function_descriptor, worker.current_session_and_job, original_handle=True) return actor_handle