def test_wait_timeout(init): loop = asyncio.get_event_loop() tasks = gen_tasks(10) futures = [async_api.as_future(obj_id) for obj_id in tasks] fut = asyncio.wait(futures, timeout=5) results, _ = loop.run_until_complete(fut) assert list(results)[0] == futures[0]
def test_wait_timeout(init): loop = asyncio.get_event_loop() tasks = gen_tasks(10) futures = [async_api.as_future(obj_id) for obj_id in tasks] fut = asyncio.wait(futures, timeout=5) results, _ = loop.run_until_complete(fut) assert list(results)[0] == futures[0]
def done_callback(future): result = future.result() # Result from async plasma, transparently pass it to user future if isinstance(future, PlasmaObjectFuture): if isinstance(result, ray.exceptions.RayTaskError): ray.worker.last_task_error_raise_time = time.time() user_future.set_exception(result.as_instanceof_cause()) else: user_future.set_result(result) else: # Result from direct call. assert isinstance(result, AsyncGetResponse), result if result.plasma_fallback_id is None: if isinstance(result.result, ray.exceptions.RayTaskError): ray.worker.last_task_error_raise_time = time.time() user_future.set_exception( result.result.as_instanceof_cause()) else: user_future.set_result(result.result) else: # Schedule plasma to async get, use the the same callback. retry_plasma_future = as_future(result.plasma_fallback_id) retry_plasma_future.add_done_callback(done_callback) # A hack to keep reference to the future so it doesn't get GC. user_future.retry_plasma_future = retry_plasma_future
def test_simple(init): @ray.remote def f(): time.sleep(1) return np.zeros(1024 * 1024, dtype=np.uint8) future = async_api.as_future(f.remote()) result = asyncio.get_event_loop().run_until_complete(future) assert isinstance(result, np.ndarray)
async def test_async(): sum_time = 0. for _ in range(50): tasks = [f.remote(n) for n in range(20)] start = time.time() futures = [async_api.as_future(obj_id) for obj_id in tasks] await asyncio.gather(*futures) sum_time += time.time() - start return sum_time
def test_simple(init): @ray.remote def f(): time.sleep(1) return {"key1": ["value"]} future = async_api.as_future(f.remote()) result = asyncio.get_event_loop().run_until_complete(future) assert result["key1"] == ["value"]
def test_simple(init): @ray.remote def f(): time.sleep(1) return {"key1": ["value"]} future = async_api.as_future(f.remote()) result = asyncio.get_event_loop().run_until_complete(future) assert result["key1"] == ["value"]
def test_gather_mixup(init): loop = asyncio.get_event_loop() @ray.remote def f(n): time.sleep(n * 0.1) return n async def g(n): await asyncio.sleep(n * 0.1) return n tasks = [ async_api.as_future(f.remote(1)), g(2), async_api.as_future(f.remote(3)), g(4) ] results = loop.run_until_complete(asyncio.gather(*tasks)) assert results == [1, 2, 3, 4]
def test_gather_mixup(init): loop = asyncio.get_event_loop() @ray.remote def f(n): time.sleep(n * 0.1) return n, np.zeros(1024 * 1024, dtype=np.uint8) async def g(n): await asyncio.sleep(n * 0.1) return n, np.zeros(1024 * 1024, dtype=np.uint8) tasks = [ async_api.as_future(f.remote(1)), g(2), async_api.as_future(f.remote(3)), g(4) ] results = loop.run_until_complete(asyncio.gather(*tasks)) assert [result[0] for result in results] == [1, 2, 3, 4]
def test_gather_mixup(init): loop = asyncio.get_event_loop() @ray.remote def f(n): time.sleep(n * 0.1) return n async def g(n): await asyncio.sleep(n * 0.1) return n tasks = [ async_api.as_future(f.remote(1)), g(2), async_api.as_future(f.remote(3)), g(4) ] results = loop.run_until_complete(asyncio.gather(*tasks)) assert results == [1, 2, 3, 4]
def test_wait_mixup(init): loop = asyncio.get_event_loop() @ray.remote def f(n): time.sleep(n) return n, np.zeros(1024 * 1024, dtype=np.uint8) def g(n): async def _g(_n): await asyncio.sleep(_n) return _n return asyncio.ensure_future(_g(n)) tasks = [ async_api.as_future(f.remote(0.1)), g(7), async_api.as_future(f.remote(5)), g(2) ] ready, _ = loop.run_until_complete(asyncio.wait(tasks, timeout=4)) assert set(ready) == {tasks[0], tasks[-1]}
def test_wait_mixup(init): loop = asyncio.get_event_loop() @ray.remote def f(n): time.sleep(n) return n def g(n): async def _g(_n): await asyncio.sleep(_n) return _n return asyncio.ensure_future(_g(n)) tasks = [ async_api.as_future(f.remote(0.1)), g(7), async_api.as_future(f.remote(5)), g(2) ] ready, _ = loop.run_until_complete(asyncio.wait(tasks, timeout=4)) assert set(ready) == {tasks[0], tasks[-1]}
def main_async(): import asyncio from ray.experimental import async_api ray.init(num_cpus=4) remote_worker = Worker.remote() loop = asyncio.get_event_loop() t_zero = time.time() tasks = [ async_api.as_future(remote_worker.sleep.remote(i)) for i in range(1, 3) ] loop.run_until_complete(asyncio.gather(tasks)) print('delta', time.time() - t_zero)
def get_async(object_id): """Asyncio compatible version of ray.get""" # Delayed import because raylet import this file and # it creates circular imports. from ray.experimental.async_api import init as async_api_init, as_future from ray.experimental.async_plasma import PlasmaObjectFuture assert isinstance(object_id, ray.ObjectID), "Batched get is not supported." # Setup async_api_init() loop = asyncio.get_event_loop() core_worker = ray.worker.global_worker.core_worker # Here's the callback used to implement async get logic. # What we want: # - If direct call, first try to get it from in memory store. # If the object if promoted to plasma, retry it from plasma API. # - If not direct call, directly use plasma API to get it. user_future = loop.create_future() # We have three future objects here. # user_future is directly returned to the user from this function. # and it will be eventually fulfilled by the final result. # inner_future is the first attempt to retrieve the object. It can be # fulfilled by either core_worker.get_async or plasma_api.as_future. # When inner_future completes, done_callback will be invoked. This # callback set the final object in user_future if the object hasn't # been promoted by plasma, otherwise it will retry from plasma. # retry_plasma_future is only created when we are getting objects that's # promoted to plasma. It will also invoke the done_callback when it's # fulfilled. def done_callback(future): result = future.result() # Result from async plasma, transparently pass it to user future if isinstance(future, PlasmaObjectFuture): if isinstance(result, ray.exceptions.RayTaskError): ray.worker.last_task_error_raise_time = time.time() user_future.set_exception(result.as_instanceof_cause()) else: user_future.set_result(result) else: # Result from direct call. assert isinstance(result, AsyncGetResponse), result if result.plasma_fallback_id is None: if isinstance(result.result, ray.exceptions.RayTaskError): ray.worker.last_task_error_raise_time = time.time() user_future.set_exception( result.result.as_instanceof_cause()) else: user_future.set_result(result.result) else: # Schedule plasma to async get, use the the same callback. retry_plasma_future = as_future(result.plasma_fallback_id) retry_plasma_future.add_done_callback(done_callback) # A hack to keep reference to the future so it doesn't get GC. user_future.retry_plasma_future = retry_plasma_future if object_id.is_direct_call_type(): inner_future = loop.create_future() core_worker.in_memory_store_get_async(object_id, inner_future) else: inner_future = as_future(object_id) inner_future.add_done_callback(done_callback) # A hack to keep reference to inner_future so it doesn't get GC. user_future.inner_future = inner_future # A hack to keep a reference to the object ID for ref counting. user_future.object_id = object_id return user_future
def test_gather(init): loop = asyncio.get_event_loop() tasks = gen_tasks() futures = [async_api.as_future(obj_id) for obj_id in tasks] results = loop.run_until_complete(asyncio.gather(*futures)) assert all(a == b for a, b in zip(results, ray.get(tasks)))
def test_wait(init): loop = asyncio.get_event_loop() tasks = gen_tasks() futures = [async_api.as_future(obj_id) for obj_id in tasks] results, _ = loop.run_until_complete(asyncio.wait(futures)) assert set(results) == set(futures)
def test_gather(init): loop = asyncio.get_event_loop() tasks = gen_tasks() futures = [async_api.as_future(obj_id) for obj_id in tasks] results = loop.run_until_complete(asyncio.gather(*futures)) assert all(a[0] == b[0] for a, b in zip(results, ray.get(tasks)))
def test_wait(init): loop = asyncio.get_event_loop() tasks = gen_tasks() futures = [async_api.as_future(obj_id) for obj_id in tasks] results, _ = loop.run_until_complete(asyncio.wait(futures)) assert set(results) == set(futures)
task_index=worker_n) self.sess = tf.Session(target=server.target) print("Worker %d: waiting for cluster connection..." % worker_n) self.sess.run(tf.report_uninitialized_variables()) print("Worker %d: cluster ready!" % worker_n) while self.sess.run(tf.report_uninitialized_variables()): print("Worker %d: waiting for variable initialization..." % worker_n) sleep(1.0) print("Worker %d: variables initialized" % worker_n) def add(self, value): self.sess.run(self.var.assign_add(value)) print(self.sess.run(self.var)) ps = ParameterServer.remote(cluster_addresses) worker_list = [ Worker.remote(cluster_addresses, worker_n) for worker_n in range(concurrency) ] loop = asyncio.get_event_loop() tasks = [ async_api.as_future(worker.add.remote(value)) for value, worker in enumerate(worker_list) ] loop.run_until_complete(asyncio.gather(*tasks))