async def test_outer_join(): todo, to_release = range(1, 15), range(10) done, released = [], [] async def inner(n): nonlocal done await aio.sleep(1 / n) done.append(n) async def outer(n, pool): nonlocal released await pool.join() released.append(n) loop = aio.get_event_loop() pool = AioPool(size=100) pool.map_n(inner, todo) joined = [loop.create_task(outer(j, pool)) for j in to_release] await pool.join() assert len(released) <= len(to_release) await aio.wait(joined) assert len(todo) == len(done) and len(released) == len(to_release)
async def details(todo=range(1, 11)): pool = AioPool(size=5) # This code: f1 = [] for i in todo: f1.append(pool.spawn_n(worker(i))) # is equivalent to one call of `map_n`: f2 = pool.map_n(worker, todo) # Afterwards you can await for any given future: try: assert 3 == await f1[2] # result of spawn_n(worker(3)) except BaseException: # exception happened in worker (including CancelledError) will be re-raised pass # Or use `asyncio.wait` to handle results in batches (see `iterwait` also): important_res = 0 more_important = [f1[1], f2[1], f2[2]] while more_important: done, more_important = await aio.wait(more_important, timeout=0.5) # handle result, note it will re-raise exceptions important_res += sum(f.result() for f in done) assert important_res == 2 + 2 + 3 # But you need to join, to allow all spawned workers to finish # (of course you can `asyncio.wait` all of the futures if you want to) await pool.join() assert all(f.done() for f in itertools.chain(f1, f2)) # this is guaranteed assert 2 * sum(todo) == sum(f.result() for f in itertools.chain(f1, f2))
async def test_cancel(): async def wrk(*arg, **kw): await aio.sleep(0.5) return 1 async def wrk_safe(*arg, **kw): try: await aio.sleep(0.5) except aio.CancelledError: await aio.sleep(0.1) # simulate cleanup return 1 pool = AioPool(size=5) f_quick = pool.spawn_n(aio.sleep(0.15)) f_safe = await pool.spawn(wrk_safe()) f3 = await pool.spawn(wrk()) pool.spawn_n(wrk()) f567 = pool.map_n(wrk, range(3)) # cancel some await aio.sleep(0.1) cancelled, results = await pool.cancel(f3, f567[2]) # running and waiting assert cancelled == len(results) == 2 # none of them had time to finish assert all(isinstance(res, aio.CancelledError) for res in results) # cancel all others await aio.sleep(0.1) # not interrupted and finished successfully assert f_quick.done() and f_quick.result() is None cancelled, results = await pool.cancel() # all assert cancelled == len(results) == 4 assert f_safe.done() and f_safe.result() == 1 # could recover # the others could not assert sum(isinstance(res, aio.CancelledError) for res in results) == 3 assert await pool.join() # joins successfully (basically no-op)
async def cancel_usage(): async def wrk(*arg, **kw): await aio.sleep(0.5) return 1 pool = AioPool(size=2) f_quick = pool.spawn_n(aio.sleep(0.1)) f12 = await pool.spawn(wrk()), pool.spawn_n(wrk()) f35 = pool.map_n(wrk, range(3)) # At this point, if you cancel futures, returned by pool methods, # you just won't be able to retrieve spawned task results, task # themselves will continue working. Don't do this: # f_quick.cancel() # use `pool.cancel` instead: # cancel some await aio.sleep(0.1) cancelled, results = await pool.cancel(f12[0], f35[2]) # running and waiting assert 2 == cancelled # none of them had time to finish assert 2 == len(results) and \ all(isinstance(res, aio.CancelledError) for res in results) # cancel all others await aio.sleep(0.1) # not interrupted and finished successfully assert f_quick.done() and f_quick.result() is None cancelled, results = await pool.cancel() # all assert 3 == cancelled assert len(results) == 3 and \ all(isinstance(res, aio.CancelledError) for res in results) assert await pool.join() # joins successfully