def main(num_replicas, batch_size, method, filename): for node_id in ["up", "down"]: with serve_benchmark.using_router(node_id): serve_benchmark.create_endpoint(node_id) config = serve_benchmark.BackendConfig( max_batch_size=batch_size, num_replicas=num_replicas ) serve_benchmark.create_backend(noop, node_id, backend_config=config) serve_benchmark.link(node_id, node_id) with serve_benchmark.using_router("up"): up_handle = serve_benchmark.get_handle("up") with serve_benchmark.using_router("down"): down_handle = serve_benchmark.get_handle("down") # start = time.perf_counter() oids = [] if method == "chain": # closed loop latencies for i in range(num_queries): r = up_handle.options(tracing_metadata={"pipeline-id": i}).remote( data=image_data ) oid = down_handle.options( tracing_metadata={"pipeline-id": i} ).remote( data=r # torch tensor ) ray.wait([oid], 1) elif method == "group": res = [ up_handle.options(tracing_metadata={"pipeline-id": i}).remote( sleep_time=0.01, data=image_data ) for i in range(num_queries) ] oids = [ down_handle.options(tracing_metadata={"pipeline-id": i}).remote( sleep_time=0.02, data=d # torch tensor ) for i, d in enumerate(res) ] else: raise RuntimeError("Unreachable") # print(f"Submission time {time.perf_counter() - start}") # end = time.perf_counter() # duration = end - start # qps = num_queries / duration # print(f"Throughput {qps}") trace_file = filename or tempfile.mkstemp(suffix=".json")[1] with open(trace_file, "w") as f: json.dump(serve_benchmark.get_trace(), f) ray.timeline(f"ray_trace_{batch_size}-{method}.json") print(f"Serve trace file written to {trace_file}")
def __init__(self, vertex_config, model_name): super().__init__() handle_list = list() for node_id in vertex_config.keys(): backend_config = vertex_config[node_id] with serve_benchmark.using_router(node_id): serve_benchmark.create_endpoint(node_id) config = serve_benchmark.BackendConfig(**backend_config) if node_id == "prepoc": min_img_size = 224 transform = transforms.Compose([ transforms.Resize(min_img_size), transforms.ToTensor(), transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], ), ]) serve_benchmark.create_backend(Transform, node_id, transform, backend_config=config) elif node_id == "model": serve_benchmark.create_backend( PredictModelPytorch, node_id, model_name, True, backend_config=config, ) serve_benchmark.link(node_id, node_id) handle_list.append(serve_benchmark.get_handle(node_id)) self.chain_handle = ChainHandle(handle_list)
def test_new_driver(serve_instance): script = """ import ray ray.init(address="{}") from benchmarking import serve_benchmark serve_benchmark.init() @serve_benchmark.route("/driver") def driver(flask_request): return "OK!" """.format(ray.worker._global_node._redis_address) with tempfile.NamedTemporaryFile(mode="w", delete=False) as f: path = f.name f.write(script) proc = subprocess.Popen(["python", path]) return_code = proc.wait(timeout=10) assert return_code == 0 handle = serve_benchmark.get_handle("driver") assert ray.get(handle.remote()) == "OK!" os.remove(path)
def __init__(self, max_batch_size, pipeline_length): self.plength = pipeline_length self.handles = list() for index in range(self.plength): node_id = f"service-{index}" with serve_benchmark.using_router(node_id): serve_benchmark.create_endpoint(node_id) config = serve_benchmark.BackendConfig( max_batch_size=max_batch_size, num_replicas=1) serve_benchmark.create_backend(noop, node_id, backend_config=config) serve_benchmark.link(node_id, node_id) self.handles.append(serve_benchmark.get_handle(node_id))
def test_no_route(serve_instance): serve_benchmark.create_endpoint("noroute-endpoint") global_state = serve_benchmark.api._get_global_state() result = global_state.route_table.list_service(include_headless=True) assert result[NO_ROUTE_KEY] == ["noroute-endpoint"] without_headless_result = global_state.route_table.list_service() assert NO_ROUTE_KEY not in without_headless_result def func(_, i=1): return 1 serve_benchmark.create_backend(func, "backend:1") serve_benchmark.link("noroute-endpoint", "backend:1") service_handle = serve_benchmark.get_handle("noroute-endpoint") result = ray.get(service_handle.remote(i=1)) assert result == 1
def test_batching_exception(serve_instance): class NoListReturned: def __init__(self): self.count = 0 @serve_benchmark.accept_batch def __call__(self, flask_request, temp=None): batch_size = serve_benchmark.context.batch_size return batch_size serve_benchmark.create_endpoint("exception-test", "/noListReturned") # set the max batch size b_config = BackendConfig(max_batch_size=5) serve_benchmark.create_backend(NoListReturned, "exception:v1", backend_config=b_config) serve_benchmark.link("exception-test", "exception:v1") handle = serve_benchmark.get_handle("exception-test") with pytest.raises(ray.exceptions.RayTaskError): assert ray.get(handle.remote(temp=1))
def test_batching(serve_instance): class BatchingExample: def __init__(self): self.count = 0 @serve_benchmark.accept_batch def __call__(self, flask_request, temp=None): self.count += 1 batch_size = serve_benchmark.context.batch_size return [self.count] * batch_size serve_benchmark.create_endpoint("counter1", "/increment") # Keep checking the routing table until /increment is populated while ("/increment" not in requests.get("http://127.0.0.1:8000/-/routes").json()): time.sleep(0.2) # set the max batch size b_config = BackendConfig(max_batch_size=5) serve_benchmark.create_backend(BatchingExample, "counter:v11", backend_config=b_config) serve_benchmark.link("counter1", "counter:v11") future_list = [] handle = serve_benchmark.get_handle("counter1") for _ in range(20): f = handle.remote(temp=1) future_list.append(f) counter_result = ray.get(future_list) # since count is only updated per batch of queries # If there atleast one __call__ fn call with batch size greater than 1 # counter result will always be less than 20 assert max(counter_result) < 20
def main(num_replicas, method): for node_id in ["up", "down"]: with serve_benchmark.using_router(node_id): serve_benchmark.create_endpoint(node_id) config = serve_benchmark.BackendConfig(max_batch_size=1, num_replicas=num_replicas) serve_benchmark.create_backend(noop, node_id, backend_config=config) serve_benchmark.link(node_id, node_id) with serve_benchmark.using_router("up"): up_handle = serve_benchmark.get_handle("up") with serve_benchmark.using_router("down"): down_handle = serve_benchmark.get_handle("down") start = time.perf_counter() oids = [] if method == "chain": for i in range(num_queries): r = up_handle.options(tracing_metadata={ "pipeline-id": i }).remote(sleep_time=0.01, data=image_data) r = down_handle.options(tracing_metadata={ "pipeline-id": i }).remote( sleep_time=0.02, data=r # torch tensor ) oids.append(r) elif method == "group": res = [ up_handle.options(tracing_metadata={ "pipeline-id": i }).remote(sleep_time=0.01, data=image_data) for i in range(num_queries) ] oids = [ down_handle.options(tracing_metadata={ "pipeline-id": i }).remote( sleep_time=0.02, data=d # torch tensor ) for i, d in enumerate(res) ] else: raise RuntimeError("Unreachable") print(f"Submission time {time.perf_counter() - start}") ray.wait(oids, len(oids)) end = time.perf_counter() duration = end - start qps = num_queries / duration print(f"Throughput {qps}") _, trace_file = tempfile.mkstemp(suffix=".json") with open(trace_file, "w") as f: json.dump(serve_benchmark.get_trace(), f) print(f"trace file written to {trace_file}")