Exemplo n.º 1
0
def test_batching(serve_instance):
    class BatchingExample:
        def __init__(self):
            self.count = 0

        @serve.accept_batch
        def __call__(self, flask_request, temp=None):
            self.count += 1
            batch_size = serve.context.batch_size
            return [self.count] * batch_size

    serve.create_endpoint("counter1", "/increment")

    # Keep checking the routing table until /increment is populated
    while "/increment" not in requests.get("http://127.0.0.1:8000/").json():
        time.sleep(0.2)

    # set the max batch size
    b_config = BackendConfig(max_batch_size=5)
    serve.create_backend(BatchingExample,
                         "counter:v11",
                         backend_config=b_config)
    serve.link("counter1", "counter:v11")

    future_list = []
    handle = serve.get_handle("counter1")
    for _ in range(20):
        f = handle.remote(temp=1)
        future_list.append(f)

    counter_result = ray.get(future_list)
    # since count is only updated per batch of queries
    # If there atleast one __call__ fn call with batch size greater than 1
    # counter result will always be less than 20
    assert max(counter_result) < 20
Exemplo n.º 2
0
def test_e2e(serve_instance):
    serve.init()  # so we have access to global state
    serve.create_endpoint("endpoint", "/api", blocking=True)
    result = serve.api._get_global_state().route_table.list_service()
    assert result["/api"] == "endpoint"

    retry_count = 5
    timeout_sleep = 0.5
    while True:
        try:
            resp = requests.get("http://127.0.0.1:8000/", timeout=0.5).json()
            assert resp == result
            break
        except Exception:
            time.sleep(timeout_sleep)
            timeout_sleep *= 2
            retry_count -= 1
            if retry_count == 0:
                assert False, "Route table hasn't been updated after 3 tries."

    def function(flask_request):
        return "OK"

    serve.create_backend(function, "echo:v1")
    serve.link("endpoint", "echo:v1")

    resp = requests.get("http://127.0.0.1:8000/api").json()["result"]
    assert resp == "OK"
Exemplo n.º 3
0
def test_not_killing_replicas(serve_instance):
    class BatchSimple:
        def __init__(self):
            self.count = 0

        @serve.accept_batch
        def __call__(self, flask_request, temp=None):
            batch_size = serve.context.batch_size
            return [1] * batch_size

    serve.create_endpoint("bsimple", "/bsimple")
    b_config = BackendConfig(num_replicas=3, max_batch_size=2)
    serve.create_backend(BatchSimple, "bsimple:v1", backend_config=b_config)
    global_state = serve.api._get_global_state()
    old_replica_tag_list = global_state.backend_table.list_replicas(
        "bsimple:v1")

    bnew_config = serve.get_backend_config("bsimple:v1")
    # change the config
    bnew_config.max_batch_size = 5
    # set the config
    serve.set_backend_config("bsimple:v1", bnew_config)
    new_replica_tag_list = global_state.backend_table.list_replicas(
        "bsimple:v1")
    global_state.refresh_actor_handle_cache()
    new_all_tag_list = list(global_state.actor_handle_cache.keys())

    # the old and new replica tag list should be identical
    # and should be subset of all_tag_list
    assert set(old_replica_tag_list) <= set(new_all_tag_list)
    assert set(old_replica_tag_list) == set(new_replica_tag_list)
Exemplo n.º 4
0
Arquivo: test_api.py Projeto: ujvl/ray
def test_e2e(serve_instance):
    serve.create_endpoint("endpoint", "/api")
    result = ray.get(
        serve.global_state.kv_store_actor_handle.list_service.remote())
    assert result == {"/api": "endpoint"}

    retry_count = 3
    while True:
        try:
            resp = requests.get("http://127.0.0.1:8000/").json()
            assert resp == result
            break
        except Exception:
            time.sleep(0.5)
            retry_count -= 1
            if retry_count == 0:
                assert False, "Route table hasn't been updated after 3 tries."

    def function(flask_request):
        return "OK"

    serve.create_backend(function, "echo:v1")
    serve.link("endpoint", "echo:v1")

    resp = requests.get("http://127.0.0.1:8000/api").json()["result"]
    assert resp == "OK"
Exemplo n.º 5
0
def test_killing_replicas(serve_instance):
    class Simple:
        def __init__(self):
            self.count = 0

        def __call__(self, flask_request, temp=None):
            return temp

    serve.create_endpoint("simple", "/simple")
    b_config = BackendConfig(num_replicas=3, num_cpus=2)
    serve.create_backend(Simple, "simple:v1", backend_config=b_config)
    global_state = serve.api._get_global_state()
    old_replica_tag_list = global_state.backend_table.list_replicas(
        "simple:v1")

    bnew_config = serve.get_backend_config("simple:v1")
    # change the config
    bnew_config.num_cpus = 1
    # set the config
    serve.set_backend_config("simple:v1", bnew_config)
    new_replica_tag_list = global_state.backend_table.list_replicas(
        "simple:v1")
    global_state.refresh_actor_handle_cache()
    new_all_tag_list = list(global_state.actor_handle_cache.keys())

    # the new_replica_tag_list must be subset of all_tag_list
    assert set(new_replica_tag_list) <= set(new_all_tag_list)

    # the old_replica_tag_list must not be subset of all_tag_list
    assert not set(old_replica_tag_list) <= set(new_all_tag_list)
Exemplo n.º 6
0
def test_e2e(serve_instance):
    serve.create_endpoint("endpoint", "/api")
    result = ray.get(
        serve.global_state.kv_store_actor_handle.list_service.remote())
    assert result == {"/api": "endpoint"}

    assert requests.get("http://127.0.0.1:8000/").json() == result

    def echo(i):
        return i

    serve.create_backend(echo, "echo:v1")
    serve.link("endpoint", "echo:v1")

    resp = requests.get("http://127.0.0.1:8000/api").json()["result"]
    assert resp["path"] == "/api"
    assert resp["method"] == "GET"
Exemplo n.º 7
0
def test_no_route(serve_instance):
    serve.create_endpoint("noroute-endpoint", blocking=True)
    global_state = serve.api._get_global_state()

    result = global_state.route_table.list_service(include_headless=True)
    assert result[NO_ROUTE_KEY] == ["noroute-endpoint"]

    without_headless_result = global_state.route_table.list_service()
    assert NO_ROUTE_KEY not in without_headless_result

    def func(_, i=1):
        return 1

    serve.create_backend(func, "backend:1")
    serve.link("noroute-endpoint", "backend:1")
    service_handle = serve.get_handle("noroute-endpoint")
    result = ray.get(service_handle.remote(i=1))
    assert result == 1
Exemplo n.º 8
0
def test_batching_exception(serve_instance):
    class NoListReturned:
        def __init__(self):
            self.count = 0

        @serve.accept_batch
        def __call__(self, flask_request, temp=None):
            batch_size = serve.context.batch_size
            return batch_size

    serve.create_endpoint("exception-test", "/noListReturned")
    # set the max batch size
    b_config = BackendConfig(max_batch_size=5)
    serve.create_backend(NoListReturned,
                         "exception:v1",
                         backend_config=b_config)
    serve.link("exception-test", "exception:v1")

    handle = serve.get_handle("exception-test")
    with pytest.raises(ray.exceptions.RayTaskError):
        assert ray.get(handle.remote(temp=1))
Exemplo n.º 9
0
def test_scaling_replicas(serve_instance):
    class Counter:
        def __init__(self):
            self.count = 0

        def __call__(self, _):
            self.count += 1
            return self.count

    serve.create_endpoint("counter", "/increment")

    # Keep checking the routing table until /increment is populated
    while "/increment" not in requests.get("http://127.0.0.1:8000/").json():
        time.sleep(0.2)

    b_config = BackendConfig(num_replicas=2)
    serve.create_backend(Counter, "counter:v1", backend_config=b_config)
    serve.link("counter", "counter:v1")

    counter_result = []
    for _ in range(10):
        resp = requests.get("http://127.0.0.1:8000/increment").json()["result"]
        counter_result.append(resp)

    # If the load is shared among two replicas. The max result cannot be 10.
    assert max(counter_result) < 10

    b_config = serve.get_backend_config("counter:v1")
    b_config.num_replicas = 1
    serve.set_backend_config("counter:v1", b_config)

    counter_result = []
    for _ in range(10):
        resp = requests.get("http://127.0.0.1:8000/increment").json()["result"]
        counter_result.append(resp)
    # Give some time for a replica to spin down. But majority of the request
    # should be served by the only remaining replica.
    assert max(counter_result) - min(counter_result) > 6
Exemplo n.º 10
0
import time

import requests

import ray
from ray.experimental import serve
from ray.experimental.serve.utils import pformat_color_json


def echo(_):
    raise Exception("Something went wrong...")


serve.init(blocking=True)

serve.create_endpoint("my_endpoint", "/echo", blocking=True)
serve.create_backend(echo, "echo:v1")
serve.link("my_endpoint", "echo:v1")

for _ in range(2):
    resp = requests.get("http://127.0.0.1:8000/echo").json()
    print(pformat_color_json(resp))

    print("...Sleeping for 2 seconds...")
    time.sleep(2)

handle = serve.get_handle("my_endpoint")
print("Invoke from python will raise exception with traceback:")
ray.get(handle.remote())
Exemplo n.º 11
0
        "vtype": "ECG"
    }
}

# create ECG service
serve.create_endpoint("ECG")
# create data point service for hospital
serve.create_endpoint("hospital",
                      route="/hospital",
                      kwargs_creator=kwargs_creator)

# create backend for ECG
b_config = BackendConfig(num_replicas=1)
serve.create_backend(PytorchPredictorECG,
                     "PredictECG",
                     model,
                     cuda,
                     backend_config=b_config)
# link service and backend
serve.link("ECG", "PredictECG")
handle = serve.get_handle("ECG")

# prepare args for StorePatientData backend.
service_handles_dict = {"ECG": handle}
# do prediction after every 3750 queries.
num_queries_dict = {"ECG": 3750}
# Always keep num_replicas as 1 as this is a stateful Backend
# This backend will store all the patient's data and transfer
# the prediction to respective Backend (ECG handle in this case)
b_config_hospital = BackendConfig(num_replicas=1)
serve.create_backend(StorePatientData,
Exemplo n.º 12
0

class EchoActor:
    def __init__(self, message):
        self.message = message

    def __call__(self, context):
        query_string_dict = urls.url_decode(context["query_string"])
        message = ""
        message += query_string_dict.get("message", "")
        message += " "
        message += self.message
        return message


serve.init(blocking=True)

serve.create_endpoint("my_endpoint", "/echo", blocking=True)
serve.create_backend(EchoActor, "echo:v1", "world")
serve.link("my_endpoint", "echo:v1")

while True:
    resp = requests.get("http://127.0.0.1:8000/echo?message=hello").json()
    print(pformat_color_json(resp))

    resp = requests.get("http://127.0.0.1:8000/echo").json()
    print(pformat_color_json(resp))

    print("...Sleeping for 2 seconds...")
    time.sleep(2)
Exemplo n.º 13
0
def echo_v1(_):
    return "v1"


def echo_v2(_):
    return "v2"


# specify the router policy as RoundRobin
serve.init(blocking=True, queueing_policy=serve.RoutePolicy.RoundRobin)

# create a service
serve.create_endpoint("my_endpoint", "/echo", blocking=True)

# create first backend
serve.create_backend(echo_v1, "echo:v1")

# create second backend
serve.create_backend(echo_v2, "echo:v2")

# link and split the service to two backends
serve.split("my_endpoint", {"echo:v1": 0.5, "echo:v2": 0.5})

while True:
    resp = requests.get("http://127.0.0.1:8000/echo").json()
    print(pformat_color_json(resp))

    print("...Sleeping for 2 seconds...")
    time.sleep(2)
Exemplo n.º 14
0
            for flask_request in flask_request_list:
                base_number = int(flask_request.args.get("base_number", "0"))
                result.append(base_number)
            return list(map(lambda x: x + self.increment, result))
        else:
            result = []
            for b in base_number:
                ans = b + self.increment
                result.append(ans)
            return result


serve.init(blocking=True)
serve.create_endpoint("magic_counter", "/counter", blocking=True)
b_config = BackendConfig(max_batch_size=5)
serve.create_backend(MagicCounter, "counter:v1", 42,
                     backend_config=b_config)  # increment=42
serve.link("magic_counter", "counter:v1")

print("Sending ten queries via HTTP")
for i in range(10):
    url = "http://127.0.0.1:8000/counter?base_number={}".format(i)
    print("> Pinging {}".format(url))
    resp = requests.get(url).json()
    print(pformat_color_json(resp))

    time.sleep(0.2)

print("Sending ten queries via Python")
handle = serve.get_handle("magic_counter")
for i in range(10):
    print("> Pinging handle.remote(base_number={})".format(i))
Exemplo n.º 15
0
def benchmark(func, name):
    for _ in range(NUM_WARMUPS):
        func()

    for _ in range(NUM_REPEATS):
        with profile(name):
            func()


def work(_):
    time.sleep(0.05)


@ray.remote
def work_ray():
    time.sleep(0.05)


serve.init()
serve.create_endpoint('sleep', '/')
serve.create_backend(work, 'sleep:v1')
serve.link('sleep', 'sleep:v1')

handle = serve.get_handle('sleep')

benchmark(lambda: ray.get(handle.remote()), "serve_sleep")
benchmark(lambda: ray.get(work_ray.remote()), "ray_sleep")

summarize_profile()
Exemplo n.º 16
0
from ray.experimental.serve.utils import pformat_color_json


class MagicCounter:
    def __init__(self, increment):
        self.increment = increment

    def __call__(self, flask_request, base_number=None):
        if serve.context.web:
            base_number = int(flask_request.args.get("base_number", "0"))
        return base_number + self.increment


serve.init(blocking=True)
serve.create_endpoint("magic_counter", "/counter", blocking=True)
serve.create_backend(MagicCounter, "counter:v1", 42)  # increment=42
serve.link("magic_counter", "counter:v1")

print("Sending ten queries via HTTP")
for i in range(10):
    url = "http://127.0.0.1:8000/counter?base_number={}".format(i)
    print("> Pinging {}".format(url))
    resp = requests.get(url).json()
    print(pformat_color_json(resp))

    time.sleep(0.2)

print("Sending ten queries via Python")
handle = serve.get_handle("magic_counter")
for i in range(10):
    print("> Pinging handle.remote(base_number={})".format(i))