Exemple #1
0
def main(num_replicas, batch_size, method, filename):
    for node_id in ["up", "down"]:
        with serve_benchmark.using_router(node_id):
            serve_benchmark.create_endpoint(node_id)
            config = serve_benchmark.BackendConfig(
                max_batch_size=batch_size, num_replicas=num_replicas
            )
            serve_benchmark.create_backend(noop, node_id, backend_config=config)
            serve_benchmark.link(node_id, node_id)

    with serve_benchmark.using_router("up"):
        up_handle = serve_benchmark.get_handle("up")
    with serve_benchmark.using_router("down"):
        down_handle = serve_benchmark.get_handle("down")

    # start = time.perf_counter()
    oids = []

    if method == "chain":
        # closed loop latencies
        for i in range(num_queries):
            r = up_handle.options(tracing_metadata={"pipeline-id": i}).remote(
                data=image_data
            )
            oid = down_handle.options(
                tracing_metadata={"pipeline-id": i}
            ).remote(
                data=r  # torch tensor
            )
            ray.wait([oid], 1)
    elif method == "group":
        res = [
            up_handle.options(tracing_metadata={"pipeline-id": i}).remote(
                sleep_time=0.01, data=image_data
            )
            for i in range(num_queries)
        ]
        oids = [
            down_handle.options(tracing_metadata={"pipeline-id": i}).remote(
                sleep_time=0.02, data=d  # torch tensor
            )
            for i, d in enumerate(res)
        ]
    else:
        raise RuntimeError("Unreachable")
    # print(f"Submission time {time.perf_counter() - start}")

    # end = time.perf_counter()

    # duration = end - start
    # qps = num_queries / duration

    # print(f"Throughput {qps}")

    trace_file = filename or tempfile.mkstemp(suffix=".json")[1]
    with open(trace_file, "w") as f:
        json.dump(serve_benchmark.get_trace(), f)
    ray.timeline(f"ray_trace_{batch_size}-{method}.json")

    print(f"Serve trace file written to {trace_file}")
Exemple #2
0
    def __init__(self, vertex_config, model_name):
        super().__init__()
        handle_list = list()
        for node_id in vertex_config.keys():
            backend_config = vertex_config[node_id]
            with serve_benchmark.using_router(node_id):
                serve_benchmark.create_endpoint(node_id)
                config = serve_benchmark.BackendConfig(**backend_config)
                if node_id == "prepoc":
                    min_img_size = 224
                    transform = transforms.Compose([
                        transforms.Resize(min_img_size),
                        transforms.ToTensor(),
                        transforms.Normalize(
                            mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225],
                        ),
                    ])
                    serve_benchmark.create_backend(Transform,
                                                   node_id,
                                                   transform,
                                                   backend_config=config)
                elif node_id == "model":
                    serve_benchmark.create_backend(
                        PredictModelPytorch,
                        node_id,
                        model_name,
                        True,
                        backend_config=config,
                    )
                serve_benchmark.link(node_id, node_id)
                handle_list.append(serve_benchmark.get_handle(node_id))

        self.chain_handle = ChainHandle(handle_list)
    def __init__(self, max_batch_size, pipeline_length):
        self.plength = pipeline_length
        self.handles = list()

        for index in range(self.plength):
            node_id = f"service-{index}"
            with serve_benchmark.using_router(node_id):
                serve_benchmark.create_endpoint(node_id)
                config = serve_benchmark.BackendConfig(
                    max_batch_size=max_batch_size, num_replicas=1)
                serve_benchmark.create_backend(noop,
                                               node_id,
                                               backend_config=config)
                serve_benchmark.link(node_id, node_id)
                self.handles.append(serve_benchmark.get_handle(node_id))
 def get_backend_config(self, backend_tag=None):
     with serve_benchmark.using_router(self.endpoint_name):
         backend_tag = self._ensure_backend_unique(backend_tag)
         return serve_benchmark.get_backend_config(backend_tag)
 def set_max_batch_size(self, new_max_batch_size, backend_tag=None):
     with serve_benchmark.using_router(self.endpoint_name):
         backend_tag = self._ensure_backend_unique(backend_tag)
         config = serve_benchmark.get_backend_config(backend_tag)
         config.max_batch_size = new_max_batch_size
         serve_benchmark.set_backend_config(backend_tag, config)
 def scale(self, new_num_replicas, backend_tag=None):
     with serve_benchmark.using_router(self.endpoint_name):
         backend_tag = self._ensure_backend_unique(backend_tag)
         config = serve_benchmark.get_backend_config(backend_tag)
         config.num_replicas = new_num_replicas
         serve_benchmark.set_backend_config(backend_tag, config)
def main(num_replicas, method):
    for node_id in ["up", "down"]:
        with serve_benchmark.using_router(node_id):
            serve_benchmark.create_endpoint(node_id)
            config = serve_benchmark.BackendConfig(max_batch_size=1,
                                                   num_replicas=num_replicas)
            serve_benchmark.create_backend(noop,
                                           node_id,
                                           backend_config=config)
            serve_benchmark.link(node_id, node_id)

    with serve_benchmark.using_router("up"):
        up_handle = serve_benchmark.get_handle("up")
    with serve_benchmark.using_router("down"):
        down_handle = serve_benchmark.get_handle("down")

    start = time.perf_counter()
    oids = []

    if method == "chain":
        for i in range(num_queries):
            r = up_handle.options(tracing_metadata={
                "pipeline-id": i
            }).remote(sleep_time=0.01, data=image_data)
            r = down_handle.options(tracing_metadata={
                "pipeline-id": i
            }).remote(
                sleep_time=0.02,
                data=r  # torch tensor
            )
            oids.append(r)
    elif method == "group":
        res = [
            up_handle.options(tracing_metadata={
                "pipeline-id": i
            }).remote(sleep_time=0.01, data=image_data)
            for i in range(num_queries)
        ]
        oids = [
            down_handle.options(tracing_metadata={
                "pipeline-id": i
            }).remote(
                sleep_time=0.02,
                data=d  # torch tensor
            ) for i, d in enumerate(res)
        ]
    else:
        raise RuntimeError("Unreachable")
    print(f"Submission time {time.perf_counter() - start}")

    ray.wait(oids, len(oids))
    end = time.perf_counter()

    duration = end - start
    qps = num_queries / duration

    print(f"Throughput {qps}")

    _, trace_file = tempfile.mkstemp(suffix=".json")
    with open(trace_file, "w") as f:
        json.dump(serve_benchmark.get_trace(), f)
    print(f"trace file written to {trace_file}")