async def runner(): async with taskgroup.TaskGroup(): async with taskgroup.TaskGroup() as g2: for _ in range(5): g2.create_task(foo()) try: await asyncio.sleep(10) except asyncio.CancelledError: raise
async def runner(): nonlocal t1, t2 async with taskgroup.TaskGroup() as g: t1 = g.create_task(foo1()) t2 = g.create_task(foo2()) await asyncio.sleep(0.1) 1 / 0
async def runner(): nonlocal NUM async with taskgroup.TaskGroup(): try: await asyncio.sleep(10) except asyncio.CancelledError: NUM += 10 raise
async def runner(): nonlocal NUM, t2 async with taskgroup.TaskGroup() as g: g.create_task(foo1()) t2 = g.create_task(foo2()) NUM += 10
async def nested_runner(): async with taskgroup.TaskGroup(name='g1') as g1: g1.create_task(crash_soon()) try: await asyncio.sleep(10) except asyncio.CancelledError: await asyncio.sleep(0.5) raise
async def stop(self): # TODO: Make a graceful version of this. try: async with taskgroup.TaskGroup() as g: for backend in self._backends: g.create_task(backend.close()) finally: await self._compiler_manager.stop()
async def runner(): nonlocal NUM async with taskgroup.TaskGroup(): try: await asyncio.sleep(10) except asyncio.CancelledError: NUM += 10 # This isn't a good idea, but we have to support # this weird case. raise MyExc
async def runner(): nonlocal NUM async with taskgroup.TaskGroup() as g: for _ in range(5): g.create_task(foo()) try: await asyncio.sleep(10) except asyncio.CancelledError: NUM += 10 raise
async def runner(): nonlocal NUM, runner_cancel async with taskgroup.TaskGroup() as g: g.create_task(foo1()) g.create_task(foo1()) g.create_task(foo1()) g.create_task(foo2()) try: await asyncio.sleep(10) except asyncio.CancelledError: runner_cancel = True raise NUM += 10
async def test_taskgroup_01(self): async def foo1(): await asyncio.sleep(0.1) return 42 async def foo2(): await asyncio.sleep(0.2) return 11 async with taskgroup.TaskGroup() as g: t1 = g.create_task(foo1()) t2 = g.create_task(foo2()) self.assertEqual(t1.result(), 42) self.assertEqual(t2.result(), 11)
async def start(self): if self._manager.is_running(): raise RuntimeError('already running') await self._manager.start() try: async with taskgroup.TaskGroup( name=f'{self._name}-pool-spawn') as g: for i in range(self._min_capacity): g.create_task(self._spawn_worker()) except taskgroup.TaskGroupError: await self.stop() raise self._gc_task = asyncio.create_task(self._worker_gc())
async def test_procpool_9(self): pool = await procpool.create_pool(max_capacity=10, min_capacity=1, gc_interval=0.01, runstate_dir=self.runstate_dir, worker_cls=Worker, worker_args=([123], ), name='test_procpool_9') try: async with taskgroup.TaskGroup() as g: for _ in range(100): g.create_task(pool.call('test1', 0.1)) await asyncio.sleep(1) await pool.call('test1', 0.1) finally: await pool.stop()
async def test_procpool_10(self): pool = await procpool.create_pool(max_capacity=10, min_capacity=2, gc_interval=0.01, runstate_dir=self.runstate_dir, worker_cls=Worker, worker_args=([123], ), name='test_procpool_10') manager = pool.manager try: async with taskgroup.TaskGroup() as g: for _ in range(100): g.create_task(pool.call('test1', 0.1)) await asyncio.sleep(0.5) self.assertEqual(manager._stats_spawned, 10) self.assertEqual(manager._stats_killed, 8) w1 = await pool.acquire() w2 = await pool.acquire() w3 = await pool.acquire() await asyncio.sleep(0.5) self.assertEqual(manager._stats_spawned, 11) self.assertEqual(manager._stats_killed, 8) await w1.call('test1', 0.1) await w2.call('test1', 0.1) await w3.call('test1', 0.1) self.assertEqual(manager._stats_spawned, 11) self.assertEqual(manager._stats_killed, 8) finally: await pool.stop() self.assertEqual(manager._stats_spawned, 11) self.assertEqual(manager._stats_killed, 11)
async def test_taskgroup_03(self): async def foo1(): await asyncio.sleep(1) return 42 async def foo2(): await asyncio.sleep(0.2) return 11 async with taskgroup.TaskGroup() as g: t1 = g.create_task(foo1()) await asyncio.sleep(0.15) # cancel t1 explicitly, i.e. everything should continue # working as expected. t1.cancel() t2 = g.create_task(foo2()) self.assertTrue(t1.cancelled()) self.assertEqual(t2.result(), 11)
async def new_backend(self, *, dbname: str, dbver: int): try: compiler = None async with taskgroup.TaskGroup() as g: new_pgcon = g.create_task(pgcon.connect(self._pgaddr, dbname)) compiler = await self._compiler_manager.spawn_worker() g.create_task(compiler.call('connect', dbname, dbver)) except Exception: try: if compiler is not None: compiler.close() finally: if (new_pgcon.done() and not new_pgcon.cancelled() and not new_pgcon.exception()): con = new_pgcon.result() con.abort() raise backend = Backend(new_pgcon.result(), compiler) self._backends.add(backend) return backend
async def runner(): async with taskgroup.TaskGroup() as g: g.create_task(foo1()) g.create_task(foo2())
async def runner(): async with taskgroup.TaskGroup() as g: g.create_task(crash_soon()) await nested()
async def runner(): async with taskgroup.TaskGroup(name='g1') as g1: g1.create_task(crash_after(0.2)) async with taskgroup.TaskGroup(name='g2') as g2: g2.create_task(crash_after(0.1))
async def runner(): async with taskgroup.TaskGroup() as g: for _ in range(5): g.create_task(foo())