コード例 #1
0
ファイル: actor.py プロジェクト: zofuthan/ray
 def __init__(self,
              actor_id,
              module_name,
              class_name,
              actor_cursor,
              actor_method_names,
              method_signatures,
              method_num_return_vals,
              actor_creation_dummy_object_id,
              actor_method_cpus,
              actor_driver_id,
              actor_handle_id=None):
     self._ray_actor_id = actor_id
     self._ray_module_name = module_name
     # False if this actor handle was created by forking or pickling. True
     # if it was created by the _serialization_helper function.
     self._ray_original_handle = actor_handle_id is None
     if self._ray_original_handle:
         self._ray_actor_handle_id = ObjectID.nil_id()
     else:
         self._ray_actor_handle_id = actor_handle_id
     self._ray_actor_cursor = actor_cursor
     self._ray_actor_counter = 0
     self._ray_actor_method_names = actor_method_names
     self._ray_method_signatures = method_signatures
     self._ray_method_num_return_vals = method_num_return_vals
     self._ray_class_name = class_name
     self._ray_actor_forks = 0
     self._ray_actor_creation_dummy_object_id = (
         actor_creation_dummy_object_id)
     self._ray_actor_method_cpus = actor_method_cpus
     self._ray_actor_driver_id = actor_driver_id
     self._ray_new_actor_handles = []
     self._ray_actor_lock = threading.Lock()
コード例 #2
0
ファイル: ir.py プロジェクト: wxdublin/ray-graph
class RayIRNode(object):
    def __init__(self):
        self.results = None
        self.group_id = ObjectID(random_string())

    def short_id(self):
        return self.group_id.hex()[:8]

    def remote(self, task, args, group_id=None, group_dep=None):
        if USE_GROUPS:
            return task._remote(args,
                                kwargs={},
                                group_id=group_id,
                                group_dependency=group_dep)
        return task._remote(args, {})

    def __del__(self):
        scheduler_free_group(self.group_id)

    def __repr__(self):
        return self.__str__()

    def __getitem__(self, key):
        assert self.results is not None
        return self.results.__getitem__(key)
コード例 #3
0
ファイル: actor.py プロジェクト: zofuthan/ray
def compute_actor_handle_id_non_forked(actor_handle_id, current_task_id):
    """Deterministically compute an actor handle ID in the non-forked case.

    This code path is used whenever an actor handle is pickled and unpickled
    (for example, if a remote function closes over an actor handle). Then,
    whenever the actor handle is used, a new actor handle ID will be generated
    on the fly as a deterministic function of the actor ID, the previous actor
    handle ID and the current task ID.

    TODO(rkn): It may be possible to cause problems by closing over multiple
    actor handles in a remote function, which then get unpickled and give rise
    to the same actor handle IDs.

    Args:
        actor_handle_id: The original actor handle ID.
        current_task_id: The ID of the task that is unpickling the handle.

    Returns:
        An ID for the new actor handle.
    """
    handle_id_hash = hashlib.sha1()
    handle_id_hash.update(actor_handle_id.id())
    handle_id_hash.update(current_task_id.id())
    handle_id = handle_id_hash.digest()
    return ObjectID(handle_id)
コード例 #4
0
ファイル: util.py プロジェクト: x-malet/ray
def get_pinned_object(pinned_id):
    """Retrieve a pinned object from the object store."""

    from ray import ObjectID

    return _from_pinnable(
        ray.get(
            ObjectID(base64.b64decode(pinned_id[len(PINNED_OBJECT_PREFIX):]))))
コード例 #5
0
ファイル: actor.py プロジェクト: nebulaV/ray
    def _serialization_helper(self, ray_forking):
        """This is defined in order to make pickling work.

        Args:
            ray_forking: True if this is being called because Ray is forking
                the actor handle and false if it is being called by pickling.

        Returns:
            A dictionary of the information needed to reconstruct the object.
        """
        if ray_forking:
            actor_handle_id = compute_actor_handle_id(
                self._ray_actor_handle_id, self._ray_actor_forks)
        else:
            actor_handle_id = self._ray_actor_handle_id

        state = {
            "actor_id": self._ray_actor_id.id(),
            "actor_handle_id": actor_handle_id.id(),
            "module_name": self._ray_module_name,
            "class_name": self._ray_class_name,
            "actor_cursor": self._ray_actor_cursor.id()
            if self._ray_actor_cursor is not None else None,
            "actor_method_names": self._ray_actor_method_names,
            "method_signatures": self._ray_method_signatures,
            "method_num_return_vals": self._ray_method_num_return_vals,
            # Actors in local mode don't have dummy objects.
            "actor_creation_dummy_object_id": self.
            _ray_actor_creation_dummy_object_id.id()
            if self._ray_actor_creation_dummy_object_id is not None else None,
            "actor_method_cpus": self._ray_actor_method_cpus,
            "actor_driver_id": self._ray_actor_driver_id.id(),
            "ray_forking": ray_forking
        }

        if ray_forking:
            self._ray_actor_forks += 1
            new_actor_handle_id = actor_handle_id
        else:
            # The execution dependency for a pickled actor handle is never safe
            # to release, since it could be unpickled and submit another
            # dependent task at any time. Therefore, we notify the backend of a
            # random handle ID that will never actually be used.
            new_actor_handle_id = ObjectID(_random_string())
        # Notify the backend to expect this new actor handle. The backend will
        # not release the cursor for any new handles until the first task for
        # each of the new handles is submitted.
        # NOTE(swang): There is currently no garbage collection for actor
        # handles until the actor itself is removed.
        self._ray_new_actor_handles.append(new_actor_handle_id)

        return state
コード例 #6
0
ファイル: actor.py プロジェクト: zofuthan/ray
def compute_actor_handle_id(actor_handle_id, num_forks):
    """Deterministically compute an actor handle ID.

    A new actor handle ID is generated when it is forked from another actor
    handle. The new handle ID is computed as hash(old_handle_id || num_forks).

    Args:
        actor_handle_id (common.ObjectID): The original actor handle ID.
        num_forks: The number of times the original actor handle has been
                   forked so far.

    Returns:
        An ID for the new actor handle.
    """
    handle_id_hash = hashlib.sha1()
    handle_id_hash.update(actor_handle_id.id())
    handle_id_hash.update(str(num_forks).encode("ascii"))
    handle_id = handle_id_hash.digest()
    return ObjectID(handle_id)
コード例 #7
0
ファイル: actor.py プロジェクト: zofuthan/ray
    def _deserialization_helper(self, state, ray_forking):
        """This is defined in order to make pickling work.

        Args:
            state: The serialized state of the actor handle.
            ray_forking: True if this is being called because Ray is forking
                the actor handle and false if it is being called by pickling.
        """
        worker = ray.worker.get_global_worker()
        worker.check_connected()

        if state["ray_forking"]:
            actor_handle_id = ObjectID(state["actor_handle_id"])
        else:
            # Right now, if the actor handle has been pickled, we create a
            # temporary actor handle id for invocations.
            # TODO(pcm): This still leads to a lot of actor handles being
            # created, there should be a better way to handle pickled
            # actor handles.
            # TODO(swang): Accessing the worker's current task ID is not
            # thread-safe.
            # TODO(swang): Unpickling the same actor handle twice in the same
            # task will break the application, and unpickling it twice in the
            # same actor is likely a performance bug. We should consider
            # logging a warning in these cases.
            actor_handle_id = compute_actor_handle_id_non_forked(
                ObjectID(state["actor_handle_id"]), worker.current_task_id)

        # This is the driver ID of the driver that owns the actor, not
        # necessarily the driver that owns this actor handle.
        actor_driver_id = ObjectID(state["actor_driver_id"])

        self.__init__(
            ObjectID(state["actor_id"]),
            state["module_name"],
            state["class_name"],
            ObjectID(state["actor_cursor"])
            if state["actor_cursor"] is not None else None,
            state["actor_method_names"],
            state["method_signatures"],
            state["method_num_return_vals"],
            ObjectID(state["actor_creation_dummy_object_id"])
            if state["actor_creation_dummy_object_id"] is not None else None,
            state["actor_method_cpus"],
            actor_driver_id,
            actor_handle_id=actor_handle_id)
コード例 #8
0
 def get_by_plasma(object_id: ray.ObjectID):
     plasma_object_id = plasma.ObjectID(object_id.binary())
     # this should be really faster becuase of zero copy
     data = plasma_client.get_buffers([plasma_object_id])[0]
     return data
コード例 #9
0
ファイル: ir.py プロジェクト: wxdublin/ray-graph
 def __init__(self):
     self.results = None
     self.group_id = ObjectID(random_string())
コード例 #10
0
ファイル: actor.py プロジェクト: zofuthan/ray
    def _remote(self,
                args,
                kwargs,
                num_cpus=None,
                num_gpus=None,
                resources=None):
        """Create an actor.

        This method allows more flexibility than the remote method because
        resource requirements can be specified and override the defaults in the
        decorator.

        Args:
            args: The arguments to forward to the actor constructor.
            kwargs: The keyword arguments to forward to the actor constructor.
            num_cpus: The number of CPUs required by the actor creation task.
            num_gpus: The number of GPUs required by the actor creation task.
            resources: The custom resources required by the actor creation
                task.

        Returns:
            A handle to the newly created actor.
        """
        worker = ray.worker.get_global_worker()
        if worker.mode is None:
            raise Exception("Actors cannot be created before ray.init() "
                            "has been called.")

        actor_id = ObjectID(_random_string())
        # The actor cursor is a dummy object representing the most recent
        # actor method invocation. For each subsequent method invocation,
        # the current cursor should be added as a dependency, and then
        # updated to reflect the new invocation.
        actor_cursor = None

        # Do not export the actor class or the actor if run in LOCAL_MODE
        # Instead, instantiate the actor locally and add it to the worker's
        # dictionary
        if worker.mode == ray.LOCAL_MODE:
            worker.actors[actor_id] = self._modified_class(
                *copy.deepcopy(args), **copy.deepcopy(kwargs))
        else:
            # Export the actor.
            if not self._exported:
                worker.function_actor_manager.export_actor_class(
                    self._modified_class, self._actor_method_names,
                    self._checkpoint_interval)
                self._exported = True

            resources = ray.utils.resources_from_resource_arguments(
                self._num_cpus, self._num_gpus, self._resources, num_cpus,
                num_gpus, resources)

            # If the actor methods require CPU resources, then set the required
            # placement resources. If actor_placement_resources is empty, then
            # the required placement resources will be the same as resources.
            actor_placement_resources = {}
            assert self._actor_method_cpus in [0, 1]
            if self._actor_method_cpus == 1:
                actor_placement_resources = resources.copy()
                actor_placement_resources["CPU"] += 1

            if args is None:
                args = []
            if kwargs is None:
                kwargs = {}
            function_name = "__init__"
            function_signature = self._method_signatures[function_name]
            creation_args = signature.extend_args(function_signature, args,
                                                  kwargs)
            function_descriptor = FunctionDescriptor(
                self._modified_class.__module__, function_name,
                self._modified_class.__name__)
            [actor_cursor] = worker.submit_task(
                function_descriptor,
                creation_args,
                actor_creation_id=actor_id,
                max_actor_reconstructions=self._max_reconstructions,
                num_return_vals=1,
                resources=resources,
                placement_resources=actor_placement_resources)

        actor_handle = ActorHandle(
            actor_id, self._modified_class.__module__, self._class_name,
            actor_cursor, self._actor_method_names, self._method_signatures,
            self._actor_method_num_return_vals, actor_cursor,
            self._actor_method_cpus, worker.task_driver_id)
        # We increment the actor counter by 1 to account for the actor creation
        # task.
        actor_handle._ray_actor_counter += 1

        return actor_handle