async def test_timer(): ''' Test the timer functionality. ''' count = 0 async def counter(interval): assert interval == 0.1 nonlocal count count += 1 count = 0 timer = aiotools.create_timer(counter, 0.1) await asyncio.sleep(0.3) timer.cancel() await timer assert count == 3 count = 0 timer = aiotools.create_timer(counter, 0.1, aiotools.TimerDelayPolicy.CANCEL) await asyncio.sleep(0.3) timer.cancel() await timer # should have same results assert count == 3
async def test_timer_leak_nocancel(): ''' Test the effect of TimerDelayPolicy.CANCEL which always cancels any pending previous tasks on each interval. ''' spawn_count = 0 cancel_count = 0 done_count = 0 async def delayed(interval): nonlocal spawn_count, cancel_count, done_count spawn_count += 1 try: await asyncio.sleep(0) except asyncio.CancelledError: cancel_count += 1 else: done_count += 1 task_count = len(asyncio.Task.all_tasks()) timer = aiotools.create_timer(delayed, 0.01, aiotools.TimerDelayPolicy.CANCEL) await asyncio.sleep(0.1) timer.cancel() await timer assert task_count + 1 >= len(asyncio.Task.all_tasks()) assert spawn_count == done_count assert cancel_count == 0
async def test_timer_leak_default(): ''' Test if the timer-fired tasks are claned up properly even when each timer-fired task takes longer than the timer interval. (In this case they will accumulate indefinitely!) ''' spawn_count = 0 cancel_count = 0 done_count = 0 async def delayed(interval): nonlocal spawn_count, cancel_count, done_count spawn_count += 1 try: await asyncio.sleep(0.05) done_count += 1 except asyncio.CancelledError: cancel_count += 1 task_count = len(asyncio.Task.all_tasks()) timer = aiotools.create_timer(delayed, 0.01) await asyncio.sleep(0.1) timer.cancel() await timer assert task_count + 1 >= len(asyncio.Task.all_tasks()) assert spawn_count == done_count + cancel_count assert 9 <= spawn_count <= 10 assert 4 <= cancel_count <= 5
async def init(app: web.Application): event_dispatcher = app['event_dispatcher'] event_dispatcher.add_handler('kernel_terminated', app, kernel_terminated) event_dispatcher.add_handler('instance_started', app, instance_started) event_dispatcher.add_handler('instance_terminated', app, instance_terminated) event_dispatcher.add_handler('instance_heartbeat', app, instance_heartbeat) event_dispatcher.add_handler('instance_stats', app, instance_stats) # Scan ALIVE agents if app['pidx'] == 0: log.debug('initializing agent status checker at proc:{0}', app['pidx']) app['agent_lost_checker'] = aiotools.create_timer( functools.partial(check_agent_lost, app), 1.0)
async def init(app: web.Application) -> None: app['log_cleanup_lock'] = aioredlock.Aioredlock([ { 'host': str(app['config']['redis']['addr'][0]), 'port': app['config']['redis']['addr'][1], 'password': app['config']['redis']['password'] if app['config']['redis']['password'] else None, 'db': REDIS_LIVE_DB }, ]) app['log_cleanup_task'] = aiotools.create_timer( functools.partial(log_cleanup_task, app), 5.0)
async def init(app): rt = app.router.add_route rt('POST', r'/v{version:\d+}/kernel/create', create) # legacy rt('POST', r'/v{version:\d+}/kernel/', create) rt('GET', r'/v{version:\d+}/kernel/{sess_id}', get_info) rt('PATCH', r'/v{version:\d+}/kernel/{sess_id}', restart) rt('DELETE', r'/v{version:\d+}/kernel/{sess_id}', destroy) rt('POST', r'/v{version:\d+}/kernel/{sess_id}', execute) rt('POST', r'/v{version:\d+}/kernel/{sess_id}/interrupt', interrupt) rt('POST', r'/v{version:\d+}/kernel/{sess_id}/complete', complete) rt('GET', r'/v{version:\d+}/stream/kernel/{sess_id}/pty', stream_pty) rt('GET', r'/v{version:\d+}/stream/kernel/{sess_id}/events', not_impl_stub) rt('POST', r'/v{version:\d+}/kernel/{sess_id}/upload', upload_files) rt('POST', r'/v{version:\d+}/folder/create', not_impl_stub) rt('GET', r'/v{version:\d+}/folder/{folder_id}', not_impl_stub) rt('POST', r'/v{version:\d+}/folder/{folder_id}', method_placeholder('DELETE')) rt('DELETE', r'/v{version:\d+}/folder/{folder_id}', not_impl_stub) app['event_dispatcher'].add_handler('kernel_terminated', kernel_terminated) app['event_dispatcher'].add_handler('instance_started', instance_started) app['event_dispatcher'].add_handler('instance_terminated', instance_terminated) app['event_dispatcher'].add_handler('instance_heartbeat', instance_heartbeat) app['event_dispatcher'].add_handler('instance_stats', instance_stats) app['stream_pty_handlers'] = defaultdict(set) app['stream_stdin_socks'] = defaultdict(set) app['registry'] = InstanceRegistry(app['config_server'], app['dbpool'], app['redis_stat_pool']) await app['registry'].init() # Scan ALIVE agents if app['pidx'] == 0: log.debug(f'initializing agent status checker at proc:{app["pidx"]}') now = time.monotonic() async for inst in app['registry'].enumerate_instances(): app['shared_states'].agent_last_seen[inst.id] = now app['agent_lost_checker'] = aiotools.create_timer( functools.partial(check_agent_lost, app), 1.0) app['shared_states'].barrier.wait() app['status'] = GatewayStatus.RUNNING
async def start_timer(): await asyncio.sleep(1.0) t = aiotools.create_timer(timer_tick, 10.0) await t
async def run(): t = aiotools.create_timer(mytick, 1.0) await asyncio.sleep(4) t.cancel() await t
async def init(app: web.Application) -> None: redis_url = app['shared_config'].get_redis_url() app['log_cleanup_lock'] = aioredlock.Aioredlock([str(redis_url)]) app['log_cleanup_task'] = aiotools.create_timer( functools.partial(log_cleanup_task, app), 5.0)