def testSucceedNeverReraise(self): loop = global_event_loop() func = SucceedNever() func_coroutine = functools.partial(loop.run_in_executor, None, func) decorator = retry(reraise=True, try_max=4, try_timeout=None, delay_func=RandomExponentialBackoff(multiplier=0.1, base=2)) decorated_func = decorator(func_coroutine) done, pending = loop.run_until_complete(wait([decorated_func()])) self.assertEqual(len(done), 1) self.assertTrue(isinstance(done[0].exception(), SucceedNeverException))
def testHangForever(self): loop = global_event_loop() func = HangForever() func_coroutine = functools.partial(loop.run_in_executor, None, func) decorator = retry(try_max=2, try_timeout=0.1, delay_func=RandomExponentialBackoff(multiplier=0.1, base=2)) decorated_func = decorator(func_coroutine) done, pending = loop.run_until_complete(wait([decorated_func()])) self.assertEqual(len(done), 1) self.assertTrue(isinstance(done[0].exception().__cause__, TimeoutError))
def testCancelRetry(self): loop = global_event_loop() func = SucceedNever() func_coroutine = functools.partial(loop.run_in_executor, None, func) decorator = retry(try_timeout=0.1, delay_func=RandomExponentialBackoff(multiplier=0.1, base=2)) decorated_func = decorator(func_coroutine) future = decorated_func() loop.call_later(0.3, future.cancel) done, pending = loop.run_until_complete(wait([future])) self.assertEqual(len(done), 1) self.assertTrue(done[0].cancelled())
def iter_completed(futures, max_jobs=None, max_load=None, loop=None): """ This is similar to asyncio.as_completed, but takes an iterator of futures as input, and includes support for max_jobs and max_load parameters. @param futures: iterator of asyncio.Future (or compatible) @type futures: iterator @param max_jobs: max number of futures to process concurrently (default is multiprocessing.cpu_count()) @type max_jobs: int @param max_load: max load allowed when scheduling a new future, otherwise schedule no more than 1 future at a time (default is multiprocessing.cpu_count()) @type max_load: int or float @param loop: event loop @type loop: EventLoop @return: iterator of futures that are done @rtype: iterator """ loop = loop or global_event_loop() max_jobs = max_jobs or multiprocessing.cpu_count() max_load = max_load or multiprocessing.cpu_count() future_map = {} def task_generator(): for future in futures: future_map[id(future)] = future yield AsyncTaskFuture(future=future) scheduler = TaskScheduler( task_generator(), max_jobs=max_jobs, max_load=max_load, event_loop=loop) try: scheduler.start() # scheduler should ensure that future_map is non-empty until # task_generator is exhausted while future_map: done, pending = loop.run_until_complete( wait(*list(future_map.values()), return_when=FIRST_COMPLETED)) for future in done: del future_map[id(future)] yield future finally: # cleanup in case of interruption by SIGINT, etc scheduler.cancel() scheduler.wait()
def iter_completed(futures, max_jobs=None, max_load=None, loop=None): """ This is similar to asyncio.as_completed, but takes an iterator of futures as input, and includes support for max_jobs and max_load parameters. @param futures: iterator of asyncio.Future (or compatible) @type futures: iterator @param max_jobs: max number of futures to process concurrently (default is multiprocessing.cpu_count()) @type max_jobs: int @param max_load: max load allowed when scheduling a new future, otherwise schedule no more than 1 future at a time (default is multiprocessing.cpu_count()) @type max_load: int or float @param loop: event loop @type loop: EventLoop @return: iterator of futures that are done @rtype: iterator """ loop = loop or global_event_loop() max_jobs = max_jobs or multiprocessing.cpu_count() max_load = max_load or multiprocessing.cpu_count() future_map = {} def task_generator(): for future in futures: future_map[id(future)] = future yield AsyncTaskFuture(future=future) scheduler = TaskScheduler(task_generator(), max_jobs=max_jobs, max_load=max_load, event_loop=loop) try: scheduler.start() # scheduler should ensure that future_map is non-empty until # task_generator is exhausted while future_map: done, pending = loop.run_until_complete( wait(list(future_map.values()), return_when=FIRST_COMPLETED)) for future in done: del future_map[id(future)] yield future finally: # cleanup in case of interruption by SIGINT, etc scheduler.cancel() scheduler.wait()