Ejemplo n.º 1
0
    async def _start_backend_replica(self, backend_tag):
        assert (backend_tag in self.backend_table.list_backends()
                ), "Backend {} is not registered.".format(backend_tag)

        replica_tag = "{}#{}".format(backend_tag, get_random_letters(length=6))

        # Fetch the info to start the replica from the backend table.
        creator = self.backend_table.get_backend_creator(backend_tag)
        backend_config_dict = self.backend_table.get_info(backend_tag)
        backend_config = BackendConfig(**backend_config_dict)
        init_args = self.backend_table.get_init_args(backend_tag)
        kwargs = backend_config.get_actor_creation_args(init_args)

        runner_handle = creator(kwargs)
        self.tag_to_actor_handles[replica_tag] = runner_handle

        # Set up the worker.

        await runner_handle._ray_serve_setup.remote(backend_tag,
                                                    self.get_router()[0],
                                                    runner_handle)
        ray.get(runner_handle._ray_serve_fetch.remote())

        # Register the worker in config tables and metric monitor.
        self.backend_table.add_replica(backend_tag, replica_tag)
        self.get_metric_monitor()[0].add_target.remote(runner_handle)
Ejemplo n.º 2
0
    async def _start_backend_replica(self, backend_tag):
        assert (backend_tag in self.backend_table.list_backends()
                ), "Backend {} is not registered.".format(backend_tag)

        replica_tag = "{}#{}".format(backend_tag, get_random_letters(length=6))

        # Register the worker in the DB.
        # TODO(edoakes): we should guarantee that if calls to the master
        # succeed, the cluster state has changed and if they fail, it hasn't.
        # Once we have master actor fault tolerance, this breaks that guarantee
        # because this method could fail after writing the replica to the DB.
        self.backend_table.add_replica(backend_tag, replica_tag)

        # Fetch the info to start the replica from the backend table.
        backend_actor = ray.remote(
            self.backend_table.get_backend_creator(backend_tag))
        backend_config_dict = self.backend_table.get_info(backend_tag)
        backend_config = BackendConfig(**backend_config_dict)
        init_args = [
            backend_tag, replica_tag,
            self.backend_table.get_init_args(backend_tag)
        ]
        kwargs = backend_config.get_actor_creation_args(init_args)

        # Start the worker.
        worker_handle = backend_actor._remote(**kwargs)
        self.tag_to_actor_handles[replica_tag] = worker_handle

        # Wait for the worker to start up.
        await worker_handle.ready.remote()
        await self.get_router()[0].add_new_worker.remote(
            backend_tag, worker_handle)

        # Register the worker with the metric monitor.
        self.get_metric_monitor()[0].add_target.remote(worker_handle)
Ejemplo n.º 3
0
Archivo: api.py Proyecto: lelegan/ray
def _start_replica(backend_tag):
    assert (backend_tag in global_state.backend_table.list_backends()
            ), "Backend {} is not registered.".format(backend_tag)

    replica_tag = "{}#{}".format(backend_tag, get_random_letters(length=6))

    # get the info which starts the replicas
    creator = global_state.backend_table.get_backend_creator(backend_tag)
    backend_config_dict = global_state.backend_table.get_info(backend_tag)
    backend_config = BackendConfig(**backend_config_dict)
    init_args = global_state.backend_table.get_init_args(backend_tag)

    # get actor creation kwargs
    actor_kwargs = backend_config.get_actor_creation_args(init_args)

    # Create the runner in the nursery
    [runner_handle] = ray.get(
        global_state.actor_nursery_handle.start_actor_with_creator.remote(
            creator, actor_kwargs, replica_tag))

    # Setup the worker
    ray.get(
        runner_handle._ray_serve_setup.remote(
            backend_tag, global_state.init_or_get_router(), runner_handle))
    runner_handle._ray_serve_fetch.remote()

    # Register the worker in config tables as well as metric monitor
    global_state.backend_table.add_replica(backend_tag, replica_tag)
    global_state.init_or_get_metric_monitor().add_target.remote(runner_handle)
Ejemplo n.º 4
0
def create_backend(func_or_class,
                   backend_tag,
                   *actor_init_args,
                   backend_config=None):
    """Create a backend using func_or_class and assign backend_tag.

    Args:
        func_or_class (callable, class): a function or a class implements
            __call__ protocol.
        backend_tag (str): a unique tag assign to this backend. It will be used
            to associate services in traffic policy.
        backend_config (BackendConfig): An object defining backend properties
        for starting a backend.
        *actor_init_args (optional): the argument to pass to the class
            initialization method.
    """
    # Configure backend_config
    if backend_config is None:
        backend_config = BackendConfig()
    assert isinstance(backend_config,
                      BackendConfig), ("backend_config must be"
                                       " of instance BackendConfig")

    # Make sure the batch size is correct
    should_accept_batch = backend_config.max_batch_size is not None
    if should_accept_batch and not _backend_accept_batch(func_or_class):
        raise batch_annotation_not_found
    if _backend_accept_batch(func_or_class):
        backend_config.has_accept_batch_annotation = True

    arg_list = []
    if inspect.isfunction(func_or_class):
        # arg list for a fn is function itself
        arg_list = [func_or_class]
        # ignore lint on lambda expression
        creator = lambda kwrgs: TaskRunnerActor._remote(**kwrgs)  # noqa: E731
    elif inspect.isclass(func_or_class):
        # Python inheritance order is right-to-left. We put RayServeMixin
        # on the left to make sure its methods are not overriden.
        @ray.remote
        class CustomActor(RayServeMixin, func_or_class):
            @wraps(func_or_class.__init__)
            def __init__(self, *args, **kwargs):
                # Initialize serve so it can be used in backends.
                init()
                super().__init__(*args, **kwargs)

        arg_list = actor_init_args
        # ignore lint on lambda expression
        creator = lambda kwargs: CustomActor._remote(**kwargs)  # noqa: E731
    else:
        raise TypeError(
            "Backend must be a function or class, it is {}.".format(
                type(func_or_class)))

    ray.get(
        master_actor.create_backend.remote(backend_tag, creator,
                                           backend_config, arg_list))
Ejemplo n.º 5
0
async def test_task_runner_custom_method_batch(serve_instance):
    q = RoundRobinPolicyQueueActor.remote()

    @serve.accept_batch
    class Batcher:
        def a(self, _):
            return ["a-{}".format(i) for i in range(serve.context.batch_size)]

        def b(self, _):
            return ["b-{}".format(i) for i in range(serve.context.batch_size)]

    CONSUMER_NAME = "runner"
    PRODUCER_NAME = "producer"

    worker = setup_worker(CONSUMER_NAME, Batcher)

    await q.link.remote(PRODUCER_NAME, CONSUMER_NAME)
    await q.set_backend_config.remote(
        CONSUMER_NAME,
        BackendConfig(max_batch_size=10).__dict__)

    a_query_param = RequestMetadata(PRODUCER_NAME,
                                    context.TaskContext.Python,
                                    call_method="a")
    b_query_param = RequestMetadata(PRODUCER_NAME,
                                    context.TaskContext.Python,
                                    call_method="b")

    futures = [q.enqueue_request.remote(a_query_param) for _ in range(2)]
    futures += [q.enqueue_request.remote(b_query_param) for _ in range(2)]

    await q.add_new_worker.remote(CONSUMER_NAME, "replica1", worker)

    gathered = await asyncio.gather(*futures)
    assert set(gathered) == {"a-0", "a-1", "b-0", "b-1"}
Ejemplo n.º 6
0
def create_backend(func_or_class,
                   backend_tag,
                   *actor_init_args,
                   backend_config=None):
    """Create a backend using func_or_class and assign backend_tag.

    Args:
        func_or_class (callable, class): a function or a class implementing
            __call__.
        backend_tag (str): a unique tag assign to this backend. It will be used
            to associate services in traffic policy.
        backend_config (BackendConfig): An object defining backend properties
        for starting a backend.
        *actor_init_args (optional): the argument to pass to the class
            initialization method.
    """
    # Configure backend_config
    if backend_config is None:
        backend_config = BackendConfig()
    assert isinstance(backend_config,
                      BackendConfig), ("backend_config must be"
                                       " of instance BackendConfig")

    # Validate that func_or_class is a function or class.
    if inspect.isfunction(func_or_class):
        if len(actor_init_args) != 0:
            raise ValueError(
                "actor_init_args not supported for function backend.")
    elif not inspect.isclass(func_or_class):
        raise ValueError(
            "Backend must be a function or class, it is {}.".format(
                type(func_or_class)))

    # Make sure the batch size is correct.
    should_accept_batch = backend_config.max_batch_size is not None
    if should_accept_batch and not _backend_accept_batch(func_or_class):
        raise batch_annotation_not_found
    if _backend_accept_batch(func_or_class):
        backend_config.has_accept_batch_annotation = True

    ray.get(
        master_actor.create_backend.remote(backend_tag, backend_config,
                                           func_or_class, actor_init_args))
Ejemplo n.º 7
0
Archivo: api.py Proyecto: lelegan/ray
def get_backend_config(backend_tag):
    """get the backend configuration for a backend tag

    Args:
        backend_tag(str): A registered backend.
    """
    assert (backend_tag in global_state.backend_table.list_backends()
            ), "Backend {} is not registered.".format(backend_tag)
    backend_config_dict = global_state.backend_table.get_info(backend_tag)
    return BackendConfig(**backend_config_dict)
Ejemplo n.º 8
0
    async def _start_backend_worker(self, backend_tag, replica_tag):
        """Creates a backend worker and waits for it to start up.

        Assumes that the backend configuration has already been registered
        in self.backends.
        """
        logger.debug("Starting worker '{}' for backend '{}'.".format(
            replica_tag, backend_tag))
        worker_creator, init_args, config_dict = self.backends[backend_tag]
        # TODO(edoakes): just store the BackendConfig in self.backends.
        backend_config = BackendConfig(**config_dict)
        kwargs = backend_config.get_actor_creation_args()

        worker_handle = async_retryable(ray.remote(worker_creator)).options(
            detached=True,
            name=replica_tag,
            max_reconstructions=ray.ray_constants.INFINITE_RECONSTRUCTION,
            **kwargs).remote(backend_tag, replica_tag, init_args)
        # TODO(edoakes): we should probably have a timeout here.
        await worker_handle.ready.remote()
        return worker_handle
Ejemplo n.º 9
0
async def test_task_runner_custom_method_batch(serve_instance):
    q = RoundRobinPolicyQueueActor.remote()

    @serve.accept_batch
    class Batcher:
        def a(self, _):
            return ["a-{}".format(i) for i in range(serve.context.batch_size)]

        def b(self, _):
            return ["b-{}".format(i) for i in range(serve.context.batch_size)]

    @ray.remote
    class CustomActor(Batcher, RayServeMixin):
        pass

    CONSUMER_NAME = "runner"
    PRODUCER_NAME = "producer"

    runner = CustomActor.remote()

    runner._ray_serve_setup.remote(CONSUMER_NAME, q, runner)

    q.link.remote(PRODUCER_NAME, CONSUMER_NAME)
    q.set_backend_config.remote(CONSUMER_NAME,
                                BackendConfig(max_batch_size=2).__dict__)

    a_query_param = RequestMetadata(PRODUCER_NAME,
                                    context.TaskContext.Python,
                                    call_method="a")
    b_query_param = RequestMetadata(PRODUCER_NAME,
                                    context.TaskContext.Python,
                                    call_method="b")

    futures = [q.enqueue_request.remote(a_query_param) for _ in range(2)]
    futures += [q.enqueue_request.remote(b_query_param) for _ in range(2)]

    runner._ray_serve_fetch.remote()

    gathered = await asyncio.gather(*futures)
    assert gathered == ["a-0", "a-1", "b-0", "b-1"]
Ejemplo n.º 10
0
 def get_backend_config(self, backend_tag):
     assert (backend_tag in self.backend_table.list_backends()
             ), "Backend {} is not registered.".format(backend_tag)
     backend_config_dict = self.backend_table.get_info(backend_tag)
     return BackendConfig(**backend_config_dict)
Ejemplo n.º 11
0
def create_backend(func_or_class,
                   backend_tag,
                   *actor_init_args,
                   backend_config=None):
    """Create a backend using func_or_class and assign backend_tag.

    Args:
        func_or_class (callable, class): a function or a class implements
            __call__ protocol.
        backend_tag (str): a unique tag assign to this backend. It will be used
            to associate services in traffic policy.
        backend_config (BackendConfig): An object defining backend properties
        for starting a backend.
        *actor_init_args (optional): the argument to pass to the class
            initialization method.
    """
    # Configure backend_config
    if backend_config is None:
        backend_config = BackendConfig()
    assert isinstance(backend_config,
                      BackendConfig), ("backend_config must be"
                                       " of instance BackendConfig")

    # Make sure the batch size is correct
    should_accept_batch = (True if backend_config.max_batch_size is not None
                           else False)
    if should_accept_batch and not _backend_accept_batch(func_or_class):
        raise batch_annotation_not_found
    if _backend_accept_batch(func_or_class):
        backend_config.has_accept_batch_annotation = True

    arg_list = []
    if inspect.isfunction(func_or_class):
        # arg list for a fn is function itself
        arg_list = [func_or_class]
        # ignore lint on lambda expression
        creator = lambda kwrgs: TaskRunnerActor._remote(**kwrgs)  # noqa: E731
    elif inspect.isclass(func_or_class):
        # Python inheritance order is right-to-left. We put RayServeMixin
        # on the left to make sure its methods are not overriden.
        @ray.remote
        class CustomActor(RayServeMixin, func_or_class):
            pass

        arg_list = actor_init_args
        # ignore lint on lambda expression
        creator = lambda kwargs: CustomActor._remote(**kwargs)  # noqa: E731
    else:
        raise TypeError(
            "Backend must be a function or class, it is {}.".format(
                type(func_or_class)))

    backend_config_dict = dict(backend_config)

    # save creator which starts replicas
    global_state.backend_table.register_backend(backend_tag, creator)

    # save information about configurations needed to start the replicas
    global_state.backend_table.register_info(backend_tag, backend_config_dict)

    # save the initial arguments needed by replicas
    global_state.backend_table.save_init_args(backend_tag, arg_list)

    # set the backend config inside the router
    # particularly for max-batch-size
    ray.get(global_state.init_or_get_router().set_backend_config.remote(
        backend_tag, backend_config_dict))
    _scale(backend_tag, backend_config_dict["num_replicas"])
Ejemplo n.º 12
0
 def get_backend_config(self, backend_tag):
     """Get the current config for the specified backend."""
     assert (backend_tag in self.backends
             ), "Backend {} is not registered.".format(backend_tag)
     return BackendConfig(**self.backends[backend_tag][2])