async def request(self, srv, method, *params, req_id=None, timeout=DEFAULT_TIMEOUT_SECS): if not self.connected: raise ConnectionError('websocket closed') method = srv + '::' + method if not req_id: req_id = uuid.uuid4().hex payload = { 'jsonrpc': '2.0', 'id': req_id, 'method': method, 'params': params } channel = Channel(1) self.waiters[req_id] = channel try: await self.ws.send_json(payload) r = await asyncio.wait_for(channel.get(), timeout=timeout) return r except ChannelClosed: raise ConnectionError('websocket closed on sending req') finally: channel.close()
def test_fifo_ordering(self): """ Test that items maintain order """ channel = Channel(3, loop=self.loop) testitems = ["first", "second", "third"] @asyncio.coroutine def add_items(): for item in testitems: yield from channel.put(item) @asyncio.coroutine def get_items(): out = [] while not channel.empty(): item = yield from channel.get() out.append(item) return out # add and check for full self.ruc(add_items()) self.assertTrue(channel.full()) # retreive and check that everything matches outitems = self.ruc(get_items()) self.assertEqual(outitems, testitems)
def test_join(self): """ Test that a channel is joinable (when closed, and queue empty) """ channel = Channel(1000, loop=self.loop) [channel.put_nowait(i) for i in range(1000)] self.assertTrue(channel.full()) # create 1000 gets, should complete the queue gets = [channel.get() for _ in range(1000)] @asyncio.coroutine def runner(): # sleep a bit, then call 1000 gets on channel, calling channel.close() in the middle yield from asyncio.sleep(0.01, loop=self.loop) n = 0 for c in gets: n += 1 if n == 500: channel.close() yield from c @asyncio.coroutine def test(): self.loop.create_task(runner()) # run the getters in the backgrund yield from asyncio.wait_for(channel.join(), timeout=2, loop=self.loop) self.ruc(test())
def test_fifo_ordering(self): """ Test that items maintain order """ channel = Channel(3, loop=self.loop) testitems = [ "first", "second", "third" ] @asyncio.coroutine def add_items(): for item in testitems: yield from channel.put(item) @asyncio.coroutine def get_items(): out = [] while not channel.empty(): item = yield from channel.get() out.append(item) return out # add and check for full self.ruc(add_items()) self.assertTrue(channel.full()) # retreive and check that everything matches outitems = self.ruc(get_items()) self.assertEqual(outitems, testitems)
def test_simple(self): loop = asyncio.get_event_loop() @asyncio.coroutine def producer(out): for i in range(NUMBER): yield from out.put(i) out.close() @asyncio.coroutine def consumer(inp): s = 0 while not inp.closed(): try: item = yield from inp.get() except ChannelClosed: break else: s += item out.close() return s @asyncio.coroutine def pump(inp, out): while not inp.closed() and not out.closed(): try: item = yield from inp.get() except ChannelClosed: break else: yield from out.put(item) out.close() # NOTE that 1 is a HORRIBLE maxsize for real-world use. # Try to change it to something like 1000 and you # will se way better speeds. inp = Channel(1) out = Channel(1) # A producer that emits an integer from range(NUMBER) # into the inp channel loop.create_task(producer(inp)) # A pump that just moves things from inp to out loop.create_task(pump(inp, out)) # A consumer that sums all items on channel out consumer_task = loop.create_task(consumer(out)) t = time() item_sum = loop.run_until_complete(consumer_task) loop.close() dt = (time() - t) message = """ Example ran in {}, that is {} elements per second The result was {} which hopefully is the same as {} """.format(dt, NUMBER / dt, item_sum, sum(range(NUMBER))) print(message)
def __init__(self, conn, tm): self.conn = conn self.logger = logging.getLogger('universe') self.last_health_check = now() self.ticks = 0 self.queue = Channel(128) self.tm = tm self.health = []
def test_getter_already_done(self): channel = Channel(2, loop=self.loop) @asyncio.coroutine def test_done_first_then_put(): yield from asyncio.sleep(0.01, loop=self.loop) channel.put_nowait("foo") channel.put_nowait("foo") self.rucgather(channel.get(), channel.get(), test_done_first_then_put())
def test_double_close(self): channel = Channel(1, loop=self.loop) self.assertFalse(channel.closed()) channel.close() self.assertTrue(channel.closed()) channel.close() self.assertTrue(channel.closed())
async def daemon(config, *, loop=None): """Main app; it runs two tasks; one schedules backups, the other one executes the. """ loop = loop or asyncio.get_event_loop() ctx = Context(config) # Using this channel, we can trigger a refresh of the list of # disk snapshots in the Google Cloud. snapshot_reload_trigger = Channel() # The backup task consumes this channel for the next backup task. scheduling_chan = Channel() schedule_task = asyncio.ensure_future( scheduler(ctx, scheduling_chan, snapshot_reload_trigger)) backup_task = asyncio.ensure_future( backuper(ctx, scheduling_chan, snapshot_reload_trigger)) tasks = [schedule_task, backup_task] _logger.debug('Gathering tasks', tasks=tasks) try: await asyncio.gather(*tasks) except asyncio.CancelledError: _logger.exception( 'Received CancelledError', tasks=tasks ) for task in tasks: task.cancel() _logger.debug('daemon cancelled task', task=task) while True: finished, pending = await asyncio.wait( tasks, return_when=asyncio.FIRST_COMPLETED) _logger.debug( 'task completed', finished=finished, pending=pending) if not pending: _logger.debug('all tasks done') raise
def test_get_throws_channel_closed(self): """ Test that even though a blocking .get() is pending on an empty queue, a close() to that queue will make the .get() throw a ChannelClosed error """ channel = Channel(1, loop=self.loop) @asyncio.coroutine def wait_close(): yield from asyncio.sleep(0.01, loop=self.loop) channel.close() (get_return, _) = self.rucgather(channel.get(), wait_close(), return_exceptions=True) self.assertIsInstance(get_return, ChannelClosed)
def test_construct(self): """ Test that we can even construct a Channel """ channel = Channel(loop=self.loop) self.assertEqual(channel.maxsize, 0) self.assertFalse(channel.full()) self.assertTrue(channel.empty()) channel = Channel(1, loop=self.loop) self.assertEqual(channel.maxsize, 1) channel = Channel(maxsize=1, loop=self.loop) self.assertEqual(channel.maxsize, 1) self.assertRaises(TypeError, lambda: Channel([], loop=self.loop)) self.assertRaises(TypeError, lambda: Channel(1.0, loop=self.loop)) self.assertRaises(TypeError, lambda: Channel(-1, loop=self.loop))
async def main(): # Set up the MeiliSearch index index = meili.setup_index() # Create an Asynchronous scheduler and channel scheduler = await aiojobs.create_scheduler() scheduler._limit = conf.SCHEDULER_MAX_TASKS channel = Channel(loop=asyncio.get_event_loop()) # Get downloads data downloads_dict = bq.downloads_dict_from_file() sorted_dict = sorted(downloads_dict.items(), key=lambda t: t[1], reverse=True) fame_levels = { "top100": sorted_dict[100][1], "top500": sorted_dict[500][1], "top1K": sorted_dict[1000][1], "top5K": sorted_dict[5000][1], ">10Kmonthly": 10000 } pkg_list = pypi.get_url_list() await scheduler.spawn(handle_package_loop(channel, len(pkg_list), index)) for pkg_link in pkg_list: pkg = pypi.Package(pkg_link.get_text()) pkg.update_package_downloads(downloads_dict, fame_levels) await scheduler.spawn(pkg.single_pkg_request(channel)) await channel.join()
def test_async_iterator(self): """ Test that we can even construct a Channel """ channel = Channel(loop=self.loop) [channel.put_nowait(i) for i in range(10)] channel.close() async def test(): s = 0 async for item in channel: s += item return s result = self.ruc(test()) self.assertEqual(result, sum(range(10)))
async def _watch_resources_thread_wrapper( client_factory: Callable[[], pykube.HTTPClient], resource_type: Type[Resource], *, loop=None) -> AsyncGenerator[_WatchEvent, None]: """ Async wrapper for pykube.watch().object_stream() """ loop = loop or asyncio.get_event_loop() _log = _logger.bind(resource_type_name=resource_type.__name__, ) channel = Channel() def worker(): try: _log.debug('watch-resources.worker.start') sync_iterator = watch_resources_sync(client_factory=client_factory, resource_type=resource_type) for event in sync_iterator: # only put_nowait seems to cause SIGSEGV loop.call_soon_threadsafe(channel.put_nowait, event) except: _log.exception('watch-resources.worker.error') finally: _log.debug('watch-resources.worker.finalized') channel.close() thread = threading.Thread( target=worker, daemon=True, ) thread.start() async for channel_event in channel: yield channel_event _log.debug('watch-resources.done')
def test_default_loop(self): new_loop = asyncio.new_event_loop() asyncio.set_event_loop(new_loop) channel = Channel() asyncio.set_event_loop(None) self.assertEqual(channel._loop, new_loop) new_loop.close()
async def combine(**generators): """Given a bunch of async generators, merges the events from all of them. Each should have a name, i.e. `foo=gen, bar=gen`. """ combined = Channel() async def listen_and_forward(name, generator): async for value in generator: await combined.put({name: value}) tasks = [] for name, generator in generators.items(): task = asyncio.ensure_future(listen_and_forward(name, generator)) # When task one or fails, close channel so that later our # iterator stops reading. def cb(task): if task.exception(): combined.close() task.add_done_callback(cb) tasks.append(task) # This one will stop when either all generators are exhaused, # or any one of the fails. async for item in combined: yield item # TODO: gather() can hang, and the task cancellation doesn't # really work. Happens if one of the generators has an error. # It seem that is bevause once we attach a done callback to # the task, gather() doesn't handle the exception anymore?? # Any tasks that are still running at this point, cancel them. for task in tasks: task.cancel() # Will consume any task exceptions await asyncio.gather(*tasks)
async def debounce(stream, delay): debounced = Channel() loop = asyncio.get_event_loop() async def iterator(): scheduled_call = None async for item in stream: if scheduled_call: scheduled_call.cancel() scheduled_call = loop.call_later( delay, lambda: asyncio.ensure_future(debounced.put(item))) # Read the incoming iterator in a task. If the task fails, close the # channel so the iterator below will stop reading. task = asyncio.ensure_future(iterator()) def cb(task): if task.exception(): debounced.close() task.add_done_callback(cb) async for item in debounced: yield item task.cancel() await asyncio.gather(task)
async def get_rules(ctx): channel = Channel() loop = asyncio.get_event_loop() _log = _logger.new() _log.debug('get-rules.start') def worker(): # sys.settrace(TracePrinter()) try: _log.debug('Iterating in thread') for value in sync_get_rules(ctx): asyncio.ensure_future(channel.put(value), loop=loop) except: _log.exception('rules.error') finally: _log.warning('Closing channel') channel.close() thread = threading.Thread( target=worker, name='get_rules', daemon=True ) _log.debug('get-rules.thread.start') thread.start() async for item in channel: rules = ctx.config.get('rules') + item _log.debug('get-rules.rules.updated', rules=rules) yield rules _log.debug('get-rules.done')
def test_get_nowait_raises_closed(self): channel = Channel(1, loop=self.loop) channel.put_nowait("foo") channel.close() item = channel.get_nowait() self.assertEqual(item, "foo") self.assertRaises(ChannelClosed, lambda: channel.get_nowait())
def test_put_get(self): """ Simple put/get test """ testitem = {"foo": "bar"} channel = Channel(1, loop=self.loop) self.ruc(channel.put(testitem)) self.assertEqual(channel.qsize(), 1) self.assertTrue(channel.full()) self.assertFalse(channel.empty()) item = self.ruc(channel.get()) self.assertEqual(item, testitem) self.assertEqual(channel.qsize(), 0) self.assertFalse(channel.full()) self.assertTrue(channel.empty())
def test_putter_cancel(self): channel = Channel(1, loop=self.loop) self.ruc(channel.put("foo")) # next put will block as channel is full self.assertTrue(channel.full()) @asyncio.coroutine def test_put(): yield from channel.put("bar") @asyncio.coroutine def test_cancel(): yield from asyncio.sleep(0.01, loop=self.loop) channel._putters[0].cancel() result = self.rucgather(test_put(), test_cancel(), return_exceptions=True) self.assertIsInstance(result[0], asyncio.CancelledError)
def test_putter_exception(self): channel = Channel(1, loop=self.loop) self.ruc(channel.put("foo")) # next put will block as channel is full self.assertTrue(channel.full()) @asyncio.coroutine def test_put(): yield from channel.put("bar") @asyncio.coroutine def test_cancel(): yield from asyncio.sleep(0.01, loop=self.loop) channel._maxsize = 2 # For hitting a different code branch in Channel channel._putters[0].set_exception(TypeError('random type error')) result = self.rucgather(test_put(), test_cancel(), return_exceptions=True) self.assertIsInstance(result[0], TypeError)
def test_multiple_blocking_gets(self): """ Test that a channel with multiple running get() still works out fine when the channel is closed """ channel = Channel(1, loop=self.loop) @asyncio.coroutine def wait_close(): yield from asyncio.sleep(0.01, loop=self.loop) channel.close() futures = [channel.get() for _ in range(100)] futures.insert(50, wait_close()) result = self.rucgather(*futures, return_exceptions=True) result.pop(50) # pop the result for wait_close() for res in result: self.assertIsInstance(res, ChannelClosed)
async def daemon(config): """Main app; it runs two tasks; one schedules backups, the other one executes the. """ ctx = Context(config) # Using this channel, we can trigger a refresh of the list of # disk snapshots in the Google Cloud. snapshot_reload_trigger = Channel() # The backup task consumes this channel for the next backup task. scheduling_chan = Channel() schedule_task = asyncio.ensure_future( scheduler(ctx, scheduling_chan, snapshot_reload_trigger)) backup_task = asyncio.ensure_future( backuper(ctx, scheduling_chan, snapshot_reload_trigger)) await asyncio.gather(schedule_task, backup_task)
async def request(self, srv, method, *params, req_id=None): if not self.connected: raise ConnectionError('websocket closed') url = urljoin(self.url_prefix, '/jsonrpc/2.0/api') method = srv + '::' + method if not req_id: req_id = uuid.uuid4().hex payload = {'id': req_id, 'method': method, 'params': params} channel = Channel(1) self.waiters[req_id] = channel try: await self.ws.send_json(payload) r = await channel.get() #del self.waiters[req_id] return r except ChannelClosed: raise ConnectionError('websocket closed on sending req') finally: channel.close()
async def main(loop): async def producer(ch): for i in range(20): await asyncio.sleep(0.1) await ch.put(i) print("produced %d" % i) ch.close() async def consumer(ch): async for i in ch: print("consumed %d" % i) channel = Channel(5) # Note: Horrible buffer size. Example only await asyncio.gather(consumer(channel), producer(channel))
async def tweets(request): channel = Channel(10, loop=app.loop) app.channels.append(channel) resp = await sse_response(request, headers={'Access-Control-Allow-Origin': '*'}) async with resp: x = await channel.get() resp.send(x) app.channels.remove(channel) return resp
def test_async_iterator_aborts_not_raises(self): channel = Channel(loop=self.loop) async def test(): s = 1 async for item in channel: s += item return s async def abort(): await self.sleep(0.01) channel.close() (result, _) = self.rucgather(test(), abort()) self.assertEqual(result, 1)
def test_getter_cancel(self): channel = Channel(1, loop=self.loop) @asyncio.coroutine def test_get(): yield from channel.get() @asyncio.coroutine def test_cancel(): yield from asyncio.sleep(0.01, loop=self.loop) channel._getters[0].cancel() result = self.rucgather(test_get(), test_cancel(), return_exceptions=True) self.assertIsInstance(result[0], asyncio.CancelledError)
async def _watch_resources_thread_wrapper( client_factory: Callable[[], pykube.HTTPClient], resource_type: Type[Resource], allow_missing: bool = False, *, loop=None) -> AsyncGenerator[_WatchEvent, None]: """ Async wrapper for pykube.watch().object_stream() """ loop = loop or asyncio.get_event_loop() _log = _logger.bind(resource_type_name=resource_type.__name__, ) channel = Channel() def worker(): try: _log.debug('watch-resources.worker.start') while True: sync_iterator = watch_resources_sync( client_factory=client_factory, resource_type=resource_type) _log.debug('watch-resources.worker.watch-opened') for event in sync_iterator: # only put_nowait seems to cause SIGSEGV loop.call_soon_threadsafe(channel.put_nowait, event) _log.debug('watch-resources.worker.watch-closed') except pykube.exceptions.HTTPError as e: # TODO: It's possible that the user creates the resource # while we are already running. We should pick this up # automatically, i.e. watch ThirdPartyResource, or just # check every couple of seconds. if e.code == 404 and allow_missing: _log.info('watch-resources.worker.skipped') else: _log.exception('watch-resources.worker.error') except: _log.exception('watch-resources.worker.error') finally: _log.debug('watch-resources.worker.finalized') channel.close() thread = threading.Thread( target=worker, daemon=True, ) thread.start() async for channel_event in channel: yield channel_event _log.debug('watch-resources.done')
def test_getter_exception(self): channel = Channel(1, loop=self.loop) @asyncio.coroutine def test_get(): yield from channel.get() @asyncio.coroutine def test_cancel(): yield from asyncio.sleep(0.01, loop=self.loop) channel.empty = lambda: False # For hitting a different code branch in Channel channel._getters[0].set_exception(TypeError('random type error')) result = self.rucgather(test_get(), test_cancel(), return_exceptions=True) self.assertIsInstance(result[0], TypeError)
async def iterate_in_executor(sync_iter, *args): """run_in_executor returns a future. But what if the function we call is supposed to return values iteratively? """ loop = asyncio.get_event_loop() channel = Channel() def forward_iter(*a): try: # TODO: We are looking for a solution to stop this # if the channel is closed. Should this thread use it's # own event loop where we can use await? for value in sync_iter(*a): asyncio.ensure_future(channel.put(value), loop=loop) finally: channel.close() result = asyncio.get_event_loop().run_in_executor(None, forward_iter, *args) async for item in channel: yield item # Any exceptions would be retrieved here await result
def test_put_throws_channel_closed(self): """ Test that when a put blocks, and a channel is closed, the put will throw a ChannelClosed instead of waiting to add to channel """ channel = Channel(1, loop=self.loop) channel.put_nowait("foo") self.assertTrue(channel.full()) @asyncio.coroutine def wait_close(): yield from asyncio.sleep(0.01, loop=self.loop) channel.close() (put_return, _) = self.rucgather(channel.put("bar"), wait_close(), return_exceptions=True) self.assertIsInstance(put_return, ChannelClosed) self.assertTrue(channel.closed())
def test_iter(self): channel = Channel(loop=self.loop) [channel.put_nowait(n) for n in range(5)] self.assertEqual(list(range(5)), list(channel))
def test_put_when_closed(self): channel = Channel(1, loop=self.loop) channel.close() self.assertRaises(ChannelClosed, lambda: self.ruc(channel.put("foo")))
def test_put_nowait_get_nowait(self): channel = Channel(1, loop=self.loop) channel.put_nowait("foo") self.assertRaises(ChannelFull, lambda: channel.put_nowait("bar")) self.assertEqual("foo", channel.get_nowait()) self.assertRaises(ChannelEmpty, lambda: channel.get_nowait())