コード例 #1
0
def test_not_killing_replicas(serve_instance):
    class BatchSimple:
        def __init__(self):
            self.count = 0

        @serve.accept_batch
        def __call__(self, flask_request, temp=None):
            batch_size = serve.context.batch_size
            return [1] * batch_size

    serve.create_endpoint("bsimple", "/bsimple")
    b_config = BackendConfig(num_replicas=3, max_batch_size=2)
    serve.create_backend(BatchSimple, "bsimple:v1", backend_config=b_config)
    master_actor = serve.api._get_master_actor()
    old_replica_tag_list = ray.get(
        master_actor._list_replicas.remote("bsimple:v1"))

    bnew_config = serve.get_backend_config("bsimple:v1")
    # change the config
    bnew_config.max_batch_size = 5
    # set the config
    serve.set_backend_config("bsimple:v1", bnew_config)
    new_replica_tag_list = ray.get(
        master_actor._list_replicas.remote("bsimple:v1"))
    new_all_tag_list = []
    for worker_dict in ray.get(
            master_actor.get_all_worker_handles.remote()).values():
        new_all_tag_list.extend(list(worker_dict.keys()))

    # the old and new replica tag list should be identical
    # and should be subset of all_tag_list
    assert set(old_replica_tag_list) <= set(new_all_tag_list)
    assert set(old_replica_tag_list) == set(new_replica_tag_list)
コード例 #2
0
ファイル: test_api.py プロジェクト: snakecy/ray
def test_batching(serve_instance):
    class BatchingExample:
        def __init__(self):
            self.count = 0

        @serve.accept_batch
        def __call__(self, flask_request, temp=None):
            self.count += 1
            batch_size = serve.context.batch_size
            return [self.count] * batch_size

    serve.create_endpoint("counter1", "/increment")

    # Keep checking the routing table until /increment is populated
    while "/increment" not in requests.get(
            "http://127.0.0.1:8000/-/routes").json():
        time.sleep(0.2)

    # set the max batch size
    b_config = BackendConfig(max_batch_size=5)
    serve.create_backend(
        BatchingExample, "counter:v11", backend_config=b_config)
    serve.link("counter1", "counter:v11")

    future_list = []
    handle = serve.get_handle("counter1")
    for _ in range(20):
        f = handle.remote(temp=1)
        future_list.append(f)

    counter_result = ray.get(future_list)
    # since count is only updated per batch of queries
    # If there atleast one __call__ fn call with batch size greater than 1
    # counter result will always be less than 20
    assert max(counter_result) < 20
コード例 #3
0
def test_killing_replicas(serve_instance):
    class Simple:
        def __init__(self):
            self.count = 0

        def __call__(self, flask_request, temp=None):
            return temp

    serve.create_endpoint("simple", "/simple")
    b_config = BackendConfig(num_replicas=3, num_cpus=2)
    serve.create_backend(Simple, "simple:v1", backend_config=b_config)
    master_actor = serve.api._get_master_actor()
    old_replica_tag_list = ray.get(
        master_actor._list_replicas.remote("simple:v1"))

    bnew_config = serve.get_backend_config("simple:v1")
    # change the config
    bnew_config.num_cpus = 1
    # set the config
    serve.set_backend_config("simple:v1", bnew_config)
    new_replica_tag_list = ray.get(
        master_actor._list_replicas.remote("simple:v1"))
    new_all_tag_list = []
    for worker_dict in ray.get(
            master_actor.get_all_worker_handles.remote()).values():
        new_all_tag_list.extend(list(worker_dict.keys()))

    # the new_replica_tag_list must be subset of all_tag_list
    assert set(new_replica_tag_list) <= set(new_all_tag_list)

    # the old_replica_tag_list must not be subset of all_tag_list
    assert not set(old_replica_tag_list) <= set(new_all_tag_list)
コード例 #4
0
ファイル: test_api.py プロジェクト: w0617/ray
def test_not_killing_replicas(serve_instance):
    class BatchSimple:
        def __init__(self):
            self.count = 0

        @serve.accept_batch
        def __call__(self, flask_request, temp=None):
            batch_size = serve.context.batch_size
            return [1] * batch_size

    serve.create_endpoint("bsimple", "/bsimple")
    b_config = BackendConfig(num_replicas=3, max_batch_size=2)
    serve.create_backend(BatchSimple, "bsimple:v1", backend_config=b_config)
    global_state = serve.api._get_global_state()
    old_replica_tag_list = global_state.backend_table.list_replicas(
        "bsimple:v1")

    bnew_config = serve.get_backend_config("bsimple:v1")
    # change the config
    bnew_config.max_batch_size = 5
    # set the config
    serve.set_backend_config("bsimple:v1", bnew_config)
    new_replica_tag_list = global_state.backend_table.list_replicas(
        "bsimple:v1")
    global_state.refresh_actor_handle_cache()
    new_all_tag_list = list(global_state.actor_handle_cache.keys())

    # the old and new replica tag list should be identical
    # and should be subset of all_tag_list
    assert set(old_replica_tag_list) <= set(new_all_tag_list)
    assert set(old_replica_tag_list) == set(new_replica_tag_list)
コード例 #5
0
ファイル: test_api.py プロジェクト: w0617/ray
def test_killing_replicas(serve_instance):
    class Simple:
        def __init__(self):
            self.count = 0

        def __call__(self, flask_request, temp=None):
            return temp

    serve.create_endpoint("simple", "/simple")
    b_config = BackendConfig(num_replicas=3, num_cpus=2)
    serve.create_backend(Simple, "simple:v1", backend_config=b_config)
    global_state = serve.api._get_global_state()
    old_replica_tag_list = global_state.backend_table.list_replicas(
        "simple:v1")

    bnew_config = serve.get_backend_config("simple:v1")
    # change the config
    bnew_config.num_cpus = 1
    # set the config
    serve.set_backend_config("simple:v1", bnew_config)
    new_replica_tag_list = global_state.backend_table.list_replicas(
        "simple:v1")
    global_state.refresh_actor_handle_cache()
    new_all_tag_list = list(global_state.actor_handle_cache.keys())

    # the new_replica_tag_list must be subset of all_tag_list
    assert set(new_replica_tag_list) <= set(new_all_tag_list)

    # the old_replica_tag_list must not be subset of all_tag_list
    assert not set(old_replica_tag_list) <= set(new_all_tag_list)
コード例 #6
0
ファイル: handle.py プロジェクト: dzorlu/minerl_rllib
async def run_test(num_replicas, num_forwarders, sync):
    client = serve.start()
    client.create_backend("worker",
                          worker,
                          config=BackendConfig(
                              num_replicas=num_replicas,
                              max_concurrent_queries=max_concurrent_queries,
                          ))
    client.create_endpoint("worker", backend="worker")
    endpoint_name = "worker"

    if num_forwarders > 0:
        client.create_backend(
            "ForwardActor",
            ForwardActor,
            sync,
            config=BackendConfig(
                num_replicas=num_forwarders,
                max_concurrent_queries=max_concurrent_queries))
        client.create_endpoint("ForwardActor", backend="ForwardActor")
        endpoint_name = "ForwardActor"

    handle = client.get_handle(endpoint_name, sync=sync)

    # warmup - helpful to wait for gc.collect() and actors to start
    start = time.time()
    while time.time() - start < 1:
        if sync:
            ray.get(handle.remote())
        else:
            ray.get(await handle.remote_async())

    # real test
    start = time.time()
    if sync:
        ray.get([handle.remote() for _ in range(num_queries)])
    else:
        ray.get([(await handle.remote_async()) for _ in range(num_queries)])
    qps = num_queries / (time.time() - start)

    print(
        f"Sync: {sync}, {num_forwarders} forwarders and {num_replicas} worker "
        f"replicas: {int(qps)} requests/s")
    client.shutdown()
コード例 #7
0
ファイル: ray_wrapper.py プロジェクト: defangc23/ModelServing
 def backend_updateconf(self,
                        backend_name,
                        scale_config=None,
                        user_config=None):
     if scale_config:
         self.client.update_backend_config(backend_name, scale_config)
     if user_config:
         from ray.serve import BackendConfig
         backend_config = BackendConfig(user_config=user_config)
         self.client.update_backend_config(backend_name, backend_config)
コード例 #8
0
def test_scaling_replicas(serve_instance):
    class Counter:
        def __init__(self):
            self.count = 0

        def __call__(self, _):
            self.count += 1
            return self.count

    serve.create_endpoint("counter", "/increment")

    # Keep checking the routing table until /increment is populated
    while "/increment" not in requests.get(
            "http://127.0.0.1:8000/-/routes").json():
        time.sleep(0.2)

    b_config = BackendConfig(num_replicas=2)
    serve.create_backend(Counter, "counter:v1", backend_config=b_config)
    serve.set_traffic("counter", {"counter:v1": 1.0})

    counter_result = []
    for _ in range(10):
        resp = requests.get("http://127.0.0.1:8000/increment").json()
        counter_result.append(resp)

    # If the load is shared among two replicas. The max result cannot be 10.
    assert max(counter_result) < 10

    b_config = serve.get_backend_config("counter:v1")
    b_config.num_replicas = 1
    serve.set_backend_config("counter:v1", b_config)

    counter_result = []
    for _ in range(10):
        resp = requests.get("http://127.0.0.1:8000/increment").json()
        counter_result.append(resp)
    # Give some time for a replica to spin down. But majority of the request
    # should be served by the only remaining replica.
    assert max(counter_result) - min(counter_result) > 6
コード例 #9
0
ファイル: handle.py プロジェクト: zseymour/ray
def run_test(num_replicas, num_forwarders):
    replicas_config = BackendConfig(num_replicas=num_replicas)
    client.update_backend_config("hello_world", replicas_config)

    if (num_forwarders == 0):
        handle = client.get_handle("hello_world")
    else:
        forwarders_config = BackendConfig(num_replicas=num_forwarders)
        client.update_backend_config("ForwardActor", forwarders_config)
        handle = client.get_handle("ForwardActor")

    # warmup - helpful to wait for gc.collect() and actors to start
    start = time.time()
    while time.time() - start < 1:
        ray.get(handle.remote())

    # real test
    start = time.time()
    ray.get([handle.remote() for _ in range(num_queries)])
    qps = num_queries / (time.time() - start)

    logger.info("{} forwarders and {} worker replicas: {} requests/s".format(
        num_forwarders, num_replicas, int(qps)))
コード例 #10
0
ファイル: test_api.py プロジェクト: snakecy/ray
def test_batching_exception(serve_instance):
    class NoListReturned:
        def __init__(self):
            self.count = 0

        @serve.accept_batch
        def __call__(self, flask_request, temp=None):
            batch_size = serve.context.batch_size
            return batch_size

    serve.create_endpoint("exception-test", "/noListReturned")
    # set the max batch size
    b_config = BackendConfig(max_batch_size=5)
    serve.create_backend(
        NoListReturned, "exception:v1", backend_config=b_config)
    serve.link("exception-test", "exception:v1")

    handle = serve.get_handle("exception-test")
    with pytest.raises(ray.exceptions.RayTaskError):
        assert ray.get(handle.remote(temp=1))
コード例 #11
0
ファイル: scalability.py プロジェクト: zseymour/ray
time_to_run = "10s"

pg = placement_group(
    [{
        "CPU": 1
    } for _ in range(expected_num_nodes)], strategy="STRICT_SPREAD")
ray.get(pg.ready())

# The number of replicas is the number of cores remaining after accounting
# for the one HTTP proxy actor on each node, the "hey" requester task on each
# node, and the serve controller.
# num_replicas = expected_num_nodes * (cpus_per_node - 2) - 1
num_replicas = ray.available_resources()["CPU"]
logger.info("Starting %i replicas", num_replicas)
client.create_backend(
    "hey", hey, config=BackendConfig(num_replicas=num_replicas))
client.create_endpoint("hey", backend="hey", route="/hey")


@ray.remote
def run_wrk():
    logger.info("Warming up for ~3 seconds")
    for _ in range(5):
        resp = requests.get("http://127.0.0.1:8000/hey").text
        logger.info("Received response \'" + resp + "\'")
        time.sleep(0.5)

    result = subprocess.run(
        [
            "wrk", "-c",
            str(num_connections), "-t",
コード例 #12
0
ファイル: scalability.py プロジェクト: dzorlu/minerl_rllib
def hey(_):
    time.sleep(0.01)  # Sleep for 10ms
    return b"hey"


pg = placement_group([{
    "CPU": 1
} for _ in range(expected_num_nodes)],
                     strategy="STRICT_SPREAD")
ray.get(pg.ready())

logger.info("Starting %i replicas", num_replicas)
client.create_backend("hey",
                      hey,
                      config=BackendConfig(num_replicas=num_replicas))
client.create_endpoint("hey", backend="hey", route="/hey")


@ray.remote(num_cpus=0)
def run_wrk():
    logger.info("Warming up")
    for _ in range(10):
        try:
            resp = requests.get("http://127.0.0.1:8000/hey").text
            logger.info("Received response '" + resp + "'")
            time.sleep(0.5)
        except Exception as e:
            logger.info(f"Got exception {e}")

    result = subprocess.run(
コード例 #13
0
ファイル: echo_actor_batch.py プロジェクト: yuishihara/ray
            result = []
            for flask_request in flask_request_list:
                base_number = int(flask_request.args.get("base_number", "0"))
                result.append(base_number)
            return list(map(lambda x: x + self.increment, result))
        else:
            result = []
            for b in base_number:
                ans = b + self.increment
                result.append(ans)
            return result


serve.init(blocking=True)
serve.create_endpoint("magic_counter", "/counter")
b_config = BackendConfig(max_batch_size=5)
serve.create_backend(MagicCounter, "counter:v1", 42,
                     backend_config=b_config)  # increment=42
serve.set_traffic("magic_counter", {"counter:v1": 1.0})

print("Sending ten queries via HTTP")
for i in range(10):
    url = "http://127.0.0.1:8000/counter?base_number={}".format(i)
    print("> Pinging {}".format(url))
    resp = requests.get(url).json()
    print(pformat_color_json(resp))

    time.sleep(0.2)

print("Sending ten queries via Python")
handle = serve.get_handle("magic_counter")
コード例 #14
0
import ray
from ray import serve
from ray.serve import BackendConfig

ray.init()
client = serve.start()


class Threshold:
    def __init__(self):
        # self.model won't be changed by reconfigure.
        self.model = random.Random()  # Imagine this is some heavyweight model.

    def reconfigure(self, config):
        # This will be called when the class is created and when
        # the user_config field of BackendConfig is updated.
        self.threshold = config["threshold"]

    def __call__(self, request):
        return self.model.random() > self.threshold


backend_config = BackendConfig(user_config={"threshold": 0.01})
client.create_backend("threshold", Threshold, config=backend_config)
client.create_endpoint("threshold", backend="threshold", route="/threshold")
print(requests.get("http://127.0.0.1:8000/threshold").text)  # true, probably

backend_config = BackendConfig(user_config={"threshold": 0.99})
client.update_backend_config("threshold", backend_config)
print(requests.get("http://127.0.0.1:8000/threshold").text)  # false, probably