def __init__(self): self.my_counter = metrics.Count( "my_counter", description=("The number of excellent requests to this backend."), tag_keys=("backend", )) self.my_counter.set_default_tags({ "backend": serve.get_current_backend_tag() })
def __init__(self, controller_name): controller = ray.get_actor(controller_name) self.router = Router(controller) self.long_poll_client = LongPollAsyncClient( controller, { LongPollKey.ROUTE_TABLE: self._update_route_table, }) self.request_counter = metrics.Count( "num_http_requests", description="The number of HTTP requests processed", tag_keys=("route", ))
async def fetch_config_from_controller(self, controller_name): assert ray.is_initialized() controller = ray.get_actor(controller_name) self.route_table = await controller.get_router_config.remote() self.request_counter = metrics.Count( "num_http_requests", description="The number of HTTP requests processed", tag_keys=("route", )) self.router = Router(controller) await self.router.setup_in_async_loop()
def __init__(self, controller_name): # Set the controller name so that serve.connect() will connect to the # controller instance this proxy is running in. ray.serve.api._set_internal_controller_name(controller_name) self.client = ray.serve.connect() controller = ray.get_actor(controller_name) self.route_table = {} # Should be updated via long polling. self.router = Router(controller) self.long_poll_client = LongPollAsyncClient(controller, { LongPollKey.ROUTE_TABLE: self._update_route_table, }) self.request_counter = metrics.Count( "num_http_requests", description="The number of HTTP requests processed", tag_keys=("route", ))
def __init__(self, router: Router, endpoint_name, handle_options: Optional[HandleOptions] = None): self.router = router self.endpoint_name = endpoint_name self.handle_options = handle_options or HandleOptions() self.handle_tag = f"{self.endpoint_name}#{get_random_letters()}" self.request_counter = metrics.Count( "serve_handle_request_counter", description=("The number of handle.remote() calls that have been " "made on this handle."), tag_keys=("handle", "endpoint")) self.request_counter.set_default_tags({ "handle": self.handle_tag, "endpoint": self.endpoint_name })
def __init__(self, controller_handle: ActorHandle): """Router process incoming queries: choose backend, and assign replica. Args: controller_handle(ActorHandle): The controller handle. """ self.controller = controller_handle self.endpoint_policies: Dict[str, EndpointPolicy] = dict() self.backend_replicas: Dict[str, ReplicaSet] = defaultdict(ReplicaSet) self._pending_endpoints: Dict[str, asyncio.Future] = dict() # -- Metrics Registration -- # self.num_router_requests = metrics.Count( "serve_num_router_requests", description="The number of requests processed by the router.", tag_keys=("endpoint", ))
def __init__(self, controller_name): # Set the controller name so that serve.connect() will connect to the # controller instance this proxy is running in. ray.serve.api._set_internal_replica_context(None, None, controller_name) self.client = ray.serve.connect() controller = ray.get_actor(controller_name) self.router = starlette.routing.Router(default=self._not_found) # route -> (endpoint_tag, methods). Updated via long polling. self.route_table: Dict[str, Tuple[EndpointTag, List[str]]] = {} self.long_poll_client = LongPollAsyncClient( controller, { LongPollKey.ROUTE_TABLE: self._update_route_table, }) self.request_counter = metrics.Count( "serve_num_http_requests", description="The number of HTTP requests processed.", tag_keys=("route", ))
def __init__(self, _callable: Callable, backend_config: BackendConfig, is_function: bool, controller_handle: ActorHandle) -> None: self.backend_tag = ray.serve.api.get_current_backend_tag() self.replica_tag = ray.serve.api.get_current_replica_tag() self.callable = _callable self.is_function = is_function self.config = backend_config self.batch_queue = BatchQueue(self.config.max_batch_size or 1, self.config.batch_wait_timeout) self.reconfigure(self.config.user_config) self.num_ongoing_requests = 0 self.request_counter = metrics.Count( "serve_backend_request_counter", description=("The number of queries that have been " "processed in this replica."), tag_keys=("backend", )) self.request_counter.set_default_tags({"backend": self.backend_tag}) self.long_poll_client = LongPollAsyncClient( controller_handle, { LongPollKey.BACKEND_CONFIGS: self._update_backend_configs, }) self.error_counter = metrics.Count( "serve_backend_error_counter", description=("The number of exceptions that have " "occurred in the backend."), tag_keys=("backend", )) self.error_counter.set_default_tags({"backend": self.backend_tag}) self.restart_counter = metrics.Count( "serve_backend_replica_starts", description=("The number of times this replica " "has been restarted due to failure."), tag_keys=("backend", "replica")) self.restart_counter.set_default_tags({ "backend": self.backend_tag, "replica": self.replica_tag }) self.queuing_latency_tracker = metrics.Histogram( "serve_backend_queuing_latency_ms", description=("The latency for queries in the replica's queue " "waiting to be processed or batched."), boundaries=DEFAULT_LATENCY_BUCKET_MS, tag_keys=("backend", "replica")) self.queuing_latency_tracker.set_default_tags({ "backend": self.backend_tag, "replica": self.replica_tag }) self.processing_latency_tracker = metrics.Histogram( "serve_backend_processing_latency_ms", description="The latency for queries to be processed.", boundaries=DEFAULT_LATENCY_BUCKET_MS, tag_keys=("backend", "replica", "batch_size")) self.processing_latency_tracker.set_default_tags({ "backend": self.backend_tag, "replica": self.replica_tag }) self.num_queued_items = metrics.Gauge( "serve_replica_queued_queries", description=("The current number of queries queued in " "the backend replicas."), tag_keys=("backend", "replica")) self.num_queued_items.set_default_tags({ "backend": self.backend_tag, "replica": self.replica_tag }) self.num_processing_items = metrics.Gauge( "serve_replica_processing_queries", description="The current number of queries being processed.", tag_keys=("backend", "replica")) self.num_processing_items.set_default_tags({ "backend": self.backend_tag, "replica": self.replica_tag }) self.restart_counter.record(1) asyncio.get_event_loop().create_task(self.main_loop())
def __init__(self, backend_tag: str, replica_tag: str, _callable: Callable, backend_config: BackendConfig, is_function: bool) -> None: self.backend_tag = backend_tag self.replica_tag = replica_tag self.callable = _callable self.is_function = is_function self.config = backend_config self.batch_queue = BatchQueue(self.config.max_batch_size or 1, self.config.batch_wait_timeout) self.num_ongoing_requests = 0 self.request_counter = metrics.Count( "backend_request_counter", description=("Number of queries that have been " "processed in this replica"), tag_keys=("backend", )) self.request_counter.set_default_tags({"backend": self.backend_tag}) self.error_counter = metrics.Count( "backend_error_counter", description=("Number of exceptions that have " "occurred in the backend"), tag_keys=("backend", )) self.error_counter.set_default_tags({"backend": self.backend_tag}) self.restart_counter = metrics.Count( "backend_worker_starts", description=("The number of time this replica workers " "has been restarted due to failure."), tag_keys=("backend", "replica_tag")) self.restart_counter.set_default_tags({ "backend": self.backend_tag, "replica_tag": self.replica_tag }) self.queuing_latency_tracker = metrics.Histogram( "backend_queuing_latency_ms", description=( "The latency for queries waiting in the replica's queue " "waiting to be processed or batched."), boundaries=DEFAULT_LATENCY_BUCKET_MS, tag_keys=("backend", "replica_tag")) self.queuing_latency_tracker.set_default_tags({ "backend": self.backend_tag, "replica_tag": self.replica_tag }) self.processing_latency_tracker = metrics.Histogram( "backend_processing_latency_ms", description="The latency for queries to be processed", boundaries=DEFAULT_LATENCY_BUCKET_MS, tag_keys=("backend", "replica_tag", "batch_size")) self.processing_latency_tracker.set_default_tags({ "backend": self.backend_tag, "replica_tag": self.replica_tag }) self.num_queued_items = metrics.Gauge( "replica_queued_queries", description=("Current number of queries queued in the " "the backend replicas"), tag_keys=("backend", "replica_tag")) self.num_queued_items.set_default_tags({ "backend": self.backend_tag, "replica_tag": self.replica_tag }) self.num_processing_items = metrics.Gauge( "replica_processing_queries", description="Current number of queries being processed", tag_keys=("backend", "replica_tag")) self.num_processing_items.set_default_tags({ "backend": self.backend_tag, "replica_tag": self.replica_tag }) self.restart_counter.record(1) asyncio.get_event_loop().create_task(self.main_loop())
async def setup(self, name, controller_name): # Note: Several queues are used in the router # - When a request come in, it's placed inside its corresponding # endpoint_queue. # - The endpoint_queue is dequeued during flush operation, which moves # the queries to backend buffer_queue. Here we match a request # for an endpoint to a backend given some policy. # - The worker_queue is used to collect idle actor handle. These # handles are dequed during the second stage of flush operation, # which assign queries in buffer_queue to actor handle. self.name = name # -- Queues -- # # endpoint_name -> request queue # We use FIFO (left to right) ordering. The new items should be added # using appendleft. Old items should be removed via pop(). self.endpoint_queues: DefaultDict[deque[Query]] = defaultdict(deque) # backend_name -> worker replica tag queue self.worker_queues: DefaultDict[deque[str]] = defaultdict(deque) # backend_name -> worker payload queue self.backend_queues = defaultdict(deque) # -- Metadata -- # # endpoint_name -> traffic_policy self.traffic = dict() # backend_name -> backend_config self.backend_info = dict() # replica tag -> worker_handle self.replicas = dict() # backend_name -> replica_tag -> concurrent queries counter self.queries_counter = defaultdict(lambda: defaultdict(int)) # -- Synchronization -- # # This lock guarantee that only one flush operation can happen at a # time. Without the lock, multiple flush operation can pop from the # same buffer_queue and worker_queue and create deadlock. For example, # an operation holding the only query and the other flush operation # holding the only idle replica. Additionally, allowing only one flush # operation at a time simplifies design overhead for custom queuing and # batching policies. self.flush_lock = asyncio.Lock() # -- State Restoration -- # # Fetch the worker handles, traffic policies, and backend configs from # the controller. We use a "pull-based" approach instead of pushing # them from the controller so that the router can transparently recover # from failure. self.controller = ray.get_actor(controller_name) traffic_policies = ray.get( self.controller.get_traffic_policies.remote()) for endpoint, traffic_policy in traffic_policies.items(): await self.set_traffic(endpoint, traffic_policy) backend_dict = ray.get(self.controller.get_all_worker_handles.remote()) for backend_tag, replica_dict in backend_dict.items(): for replica_tag, worker in replica_dict.items(): await self.add_new_worker(backend_tag, replica_tag, worker) backend_configs = ray.get(self.controller.get_backend_configs.remote()) for backend, backend_config in backend_configs.items(): await self.set_backend_config(backend, backend_config) # -- Metrics Registration -- # self.num_router_requests = metrics.Count( "num_router_requests", description="Number of requests processed by the router.", tag_keys=("endpoint", )) self.num_error_endpoint_requests = metrics.Count( "num_error_endpoint_requests", description=( "Number of requests that errored when getting results " "for the endpoint."), tag_keys=("endpoint", )) self.num_error_backend_requests = metrics.Count( "num_error_backend_requests", description=("Number of requests that errored when getting result " "from the backend."), tag_keys=("backend", )) self.backend_queue_size = metrics.Gauge( "backend_queued_queries", description=("Current number of queries queued " "in the router for a backend"), tag_keys=("backend", )) asyncio.get_event_loop().create_task(self.report_queue_lengths())