def test_batching(serve_instance): class BatchingExample: def __init__(self): self.count = 0 @serve.accept_batch def __call__(self, flask_request, temp=None): self.count += 1 batch_size = serve.context.batch_size return [self.count] * batch_size serve.create_endpoint("counter1", "/increment") # Keep checking the routing table until /increment is populated while "/increment" not in requests.get("http://127.0.0.1:8000/").json(): time.sleep(0.2) # set the max batch size b_config = BackendConfig(max_batch_size=5) serve.create_backend(BatchingExample, "counter:v11", backend_config=b_config) serve.link("counter1", "counter:v11") future_list = [] handle = serve.get_handle("counter1") for _ in range(20): f = handle.remote(temp=1) future_list.append(f) counter_result = ray.get(future_list) # since count is only updated per batch of queries # If there atleast one __call__ fn call with batch size greater than 1 # counter result will always be less than 20 assert max(counter_result) < 20
def test_e2e(serve_instance): serve.init() # so we have access to global state serve.create_endpoint("endpoint", "/api", blocking=True) result = serve.api._get_global_state().route_table.list_service() assert result["/api"] == "endpoint" retry_count = 5 timeout_sleep = 0.5 while True: try: resp = requests.get("http://127.0.0.1:8000/", timeout=0.5).json() assert resp == result break except Exception: time.sleep(timeout_sleep) timeout_sleep *= 2 retry_count -= 1 if retry_count == 0: assert False, "Route table hasn't been updated after 3 tries." def function(flask_request): return "OK" serve.create_backend(function, "echo:v1") serve.link("endpoint", "echo:v1") resp = requests.get("http://127.0.0.1:8000/api").json()["result"] assert resp == "OK"
def test_not_killing_replicas(serve_instance): class BatchSimple: def __init__(self): self.count = 0 @serve.accept_batch def __call__(self, flask_request, temp=None): batch_size = serve.context.batch_size return [1] * batch_size serve.create_endpoint("bsimple", "/bsimple") b_config = BackendConfig(num_replicas=3, max_batch_size=2) serve.create_backend(BatchSimple, "bsimple:v1", backend_config=b_config) global_state = serve.api._get_global_state() old_replica_tag_list = global_state.backend_table.list_replicas( "bsimple:v1") bnew_config = serve.get_backend_config("bsimple:v1") # change the config bnew_config.max_batch_size = 5 # set the config serve.set_backend_config("bsimple:v1", bnew_config) new_replica_tag_list = global_state.backend_table.list_replicas( "bsimple:v1") global_state.refresh_actor_handle_cache() new_all_tag_list = list(global_state.actor_handle_cache.keys()) # the old and new replica tag list should be identical # and should be subset of all_tag_list assert set(old_replica_tag_list) <= set(new_all_tag_list) assert set(old_replica_tag_list) == set(new_replica_tag_list)
def test_e2e(serve_instance): serve.create_endpoint("endpoint", "/api") result = ray.get( serve.global_state.kv_store_actor_handle.list_service.remote()) assert result == {"/api": "endpoint"} retry_count = 3 while True: try: resp = requests.get("http://127.0.0.1:8000/").json() assert resp == result break except Exception: time.sleep(0.5) retry_count -= 1 if retry_count == 0: assert False, "Route table hasn't been updated after 3 tries." def function(flask_request): return "OK" serve.create_backend(function, "echo:v1") serve.link("endpoint", "echo:v1") resp = requests.get("http://127.0.0.1:8000/api").json()["result"] assert resp == "OK"
def test_killing_replicas(serve_instance): class Simple: def __init__(self): self.count = 0 def __call__(self, flask_request, temp=None): return temp serve.create_endpoint("simple", "/simple") b_config = BackendConfig(num_replicas=3, num_cpus=2) serve.create_backend(Simple, "simple:v1", backend_config=b_config) global_state = serve.api._get_global_state() old_replica_tag_list = global_state.backend_table.list_replicas( "simple:v1") bnew_config = serve.get_backend_config("simple:v1") # change the config bnew_config.num_cpus = 1 # set the config serve.set_backend_config("simple:v1", bnew_config) new_replica_tag_list = global_state.backend_table.list_replicas( "simple:v1") global_state.refresh_actor_handle_cache() new_all_tag_list = list(global_state.actor_handle_cache.keys()) # the new_replica_tag_list must be subset of all_tag_list assert set(new_replica_tag_list) <= set(new_all_tag_list) # the old_replica_tag_list must not be subset of all_tag_list assert not set(old_replica_tag_list) <= set(new_all_tag_list)
def test_e2e(serve_instance): serve.create_endpoint("endpoint", "/api") result = ray.get( serve.global_state.kv_store_actor_handle.list_service.remote()) assert result == {"/api": "endpoint"} assert requests.get("http://127.0.0.1:8000/").json() == result def echo(i): return i serve.create_backend(echo, "echo:v1") serve.link("endpoint", "echo:v1") resp = requests.get("http://127.0.0.1:8000/api").json()["result"] assert resp["path"] == "/api" assert resp["method"] == "GET"
def test_no_route(serve_instance): serve.create_endpoint("noroute-endpoint", blocking=True) global_state = serve.api._get_global_state() result = global_state.route_table.list_service(include_headless=True) assert result[NO_ROUTE_KEY] == ["noroute-endpoint"] without_headless_result = global_state.route_table.list_service() assert NO_ROUTE_KEY not in without_headless_result def func(_, i=1): return 1 serve.create_backend(func, "backend:1") serve.link("noroute-endpoint", "backend:1") service_handle = serve.get_handle("noroute-endpoint") result = ray.get(service_handle.remote(i=1)) assert result == 1
def test_batching_exception(serve_instance): class NoListReturned: def __init__(self): self.count = 0 @serve.accept_batch def __call__(self, flask_request, temp=None): batch_size = serve.context.batch_size return batch_size serve.create_endpoint("exception-test", "/noListReturned") # set the max batch size b_config = BackendConfig(max_batch_size=5) serve.create_backend(NoListReturned, "exception:v1", backend_config=b_config) serve.link("exception-test", "exception:v1") handle = serve.get_handle("exception-test") with pytest.raises(ray.exceptions.RayTaskError): assert ray.get(handle.remote(temp=1))
def test_scaling_replicas(serve_instance): class Counter: def __init__(self): self.count = 0 def __call__(self, _): self.count += 1 return self.count serve.create_endpoint("counter", "/increment") # Keep checking the routing table until /increment is populated while "/increment" not in requests.get("http://127.0.0.1:8000/").json(): time.sleep(0.2) b_config = BackendConfig(num_replicas=2) serve.create_backend(Counter, "counter:v1", backend_config=b_config) serve.link("counter", "counter:v1") counter_result = [] for _ in range(10): resp = requests.get("http://127.0.0.1:8000/increment").json()["result"] counter_result.append(resp) # If the load is shared among two replicas. The max result cannot be 10. assert max(counter_result) < 10 b_config = serve.get_backend_config("counter:v1") b_config.num_replicas = 1 serve.set_backend_config("counter:v1", b_config) counter_result = [] for _ in range(10): resp = requests.get("http://127.0.0.1:8000/increment").json()["result"] counter_result.append(resp) # Give some time for a replica to spin down. But majority of the request # should be served by the only remaining replica. assert max(counter_result) - min(counter_result) > 6
import time import requests import ray from ray.experimental import serve from ray.experimental.serve.utils import pformat_color_json def echo(_): raise Exception("Something went wrong...") serve.init(blocking=True) serve.create_endpoint("my_endpoint", "/echo", blocking=True) serve.create_backend(echo, "echo:v1") serve.link("my_endpoint", "echo:v1") for _ in range(2): resp = requests.get("http://127.0.0.1:8000/echo").json() print(pformat_color_json(resp)) print("...Sleeping for 2 seconds...") time.sleep(2) handle = serve.get_handle("my_endpoint") print("Invoke from python will raise exception with traceback:") ray.get(handle.remote())
n_block, hw)) p.touch() os.environ["SERVE_PROFILE_PATH"] = str(p.resolve()) serve.init(blocking=True) # Kwargs creator for profiling the service kwargs_creator = lambda: { 'info': { "patient_name": "Adam", "value": 0.0, "vtype": "ECG" } } # create ECG service serve.create_endpoint("ECG") # create data point service for hospital serve.create_endpoint("hospital", route="/hospital", kwargs_creator=kwargs_creator) # create backend for ECG b_config = BackendConfig(num_replicas=1) serve.create_backend(PytorchPredictorECG, "PredictECG", model, cuda, backend_config=b_config) # link service and backend serve.link("ECG", "PredictECG") handle = serve.get_handle("ECG")
if serve.context.web: result = [] for flask_request in flask_request_list: base_number = int(flask_request.args.get("base_number", "0")) result.append(base_number) return list(map(lambda x: x + self.increment, result)) else: result = [] for b in base_number: ans = b + self.increment result.append(ans) return result serve.init(blocking=True) serve.create_endpoint("magic_counter", "/counter", blocking=True) b_config = BackendConfig(max_batch_size=5) serve.create_backend(MagicCounter, "counter:v1", 42, backend_config=b_config) # increment=42 serve.link("magic_counter", "counter:v1") print("Sending ten queries via HTTP") for i in range(10): url = "http://127.0.0.1:8000/counter?base_number={}".format(i) print("> Pinging {}".format(url)) resp = requests.get(url).json() print(pformat_color_json(resp)) time.sleep(0.2) print("Sending ten queries via Python")
""" import time import requests import ray import ray.experimental.serve as serve from ray.experimental.serve.utils import pformat_color_json # initialize ray serve system. # blocking=True will wait for HTTP server to be ready to serve request. serve.init(blocking=True) # an endpoint is associated with an http URL. serve.create_endpoint("my_endpoint", "/echo") # a backend can be a function or class. # it can be made to be invoked from web as well as python. def echo_v1(flask_request, response="hello from python!"): if serve.context.web: response = flask_request.url return response serve.create_backend(echo_v1, "echo:v1") # We can link an endpoint to a backend, the means all the traffic # goes to my_endpoint will now goes to echo:v1 backend. serve.link("my_endpoint", "echo:v1")
def benchmark(func, name): for _ in range(NUM_WARMUPS): func() for _ in range(NUM_REPEATS): with profile(name): func() def work(_): time.sleep(0.05) @ray.remote def work_ray(): time.sleep(0.05) serve.init() serve.create_endpoint('sleep', '/') serve.create_backend(work, 'sleep:v1') serve.link('sleep', 'sleep:v1') handle = serve.get_handle('sleep') benchmark(lambda: ray.get(handle.remote()), "serve_sleep") benchmark(lambda: ray.get(work_ray.remote()), "ray_sleep") summarize_profile()