def test_ctor_loop(self): loop = unittest.mock.Mock() q = queues.Queue(loop=loop) self.assertIs(q._loop, loop) q = queues.Queue(loop=self.loop) self.assertIs(q._loop, self.loop)
def test_full(self): q = queues.Queue(loop=self.loop) self.assertFalse(q.full()) q = queues.Queue(maxsize=1, loop=self.loop) q.put_nowait(1) self.assertTrue(q.full())
def _test_repr_or_str(self, fn, expect_id): """Test Queue's repr or str. fn is repr or str. expect_id is True if we expect the Queue's id to appear in fn(Queue()). """ def gen(): when = yield self.assertAlmostEqual(0.1, when) when = yield 0.1 self.assertAlmostEqual(0.2, when) yield 0.1 loop = test_utils.TestLoop(gen) self.addCleanup(loop.close) q = queues.Queue(loop=loop) self.assertTrue(fn(q).startswith('<Queue'), fn(q)) id_is_present = hex(id(q)) in fn(q) self.assertEqual(expect_id, id_is_present) @tasks.coroutine def add_getter(): q = queues.Queue(loop=loop) # Start a task that waits to get. tasks.Task(q.get(), loop=loop) # Let it start waiting. yield from tasks.sleep(0.1, loop=loop) self.assertTrue('_getters[1]' in fn(q)) # resume q.get coroutine to finish generator q.put_nowait(0) loop.run_until_complete(add_getter()) @tasks.coroutine def add_putter(): q = queues.Queue(maxsize=1, loop=loop) q.put_nowait(1) # Start a task that waits to put. tasks.Task(q.put(2), loop=loop) # Let it start waiting. yield from tasks.sleep(0.1, loop=loop) self.assertTrue('_putters[1]' in fn(q)) # resume q.put coroutine to finish generator q.get_nowait() loop.run_until_complete(add_putter()) q = queues.Queue(loop=loop) q.put_nowait(1) self.assertTrue('_queue=[1]' in fn(q))
def test_get_with_waiting_putters(self): q = queues.Queue(loop=self.loop, maxsize=1) tasks.Task(q.put('a'), loop=self.loop) tasks.Task(q.put('b'), loop=self.loop) test_utils.run_briefly(self.loop) self.assertEqual(self.loop.run_until_complete(q.get()), 'a') self.assertEqual(self.loop.run_until_complete(q.get()), 'b')
def test_get_cancelled(self): def gen(): when = yield self.assertAlmostEqual(0.01, when) when = yield 0.01 self.assertAlmostEqual(0.061, when) yield 0.05 loop = test_utils.TestLoop(gen) self.addCleanup(loop.close) q = queues.Queue(loop=loop) @tasks.coroutine def queue_get(): return (yield from tasks.wait_for(q.get(), 0.051, loop=loop)) @tasks.coroutine def test(): get_task = tasks.Task(queue_get(), loop=loop) yield from tasks.sleep(0.01, loop=loop) # let the task start q.put_nowait(1) return (yield from get_task) self.assertEqual(1, loop.run_until_complete(test())) self.assertAlmostEqual(0.06, loop.time())
def test_ctor_noloop(self): try: events.set_event_loop(self.loop) q = queues.Queue() self.assertIs(q._loop, self.loop) finally: events.set_event_loop(None)
def test_empty(self): q = queues.Queue(loop=self.loop) self.assertTrue(q.empty()) q.put_nowait(1) self.assertFalse(q.empty()) self.assertEqual(1, q.get_nowait()) self.assertTrue(q.empty())
def test_order(self): q = queues.Queue(loop=self.loop) for i in [1, 3, 2]: q.put_nowait(i) items = [q.get_nowait() for _ in range(3)] self.assertEqual([1, 3, 2], items)
def test_blocking_put_wait(self): def gen(): when = yield self.assertAlmostEqual(0.01, when) yield 0.01 loop = test_utils.TestLoop(gen) self.addCleanup(loop.close) q = queues.Queue(maxsize=1, loop=loop) started = locks.Event(loop=loop) finished = False @tasks.coroutine def queue_put(): nonlocal finished started.set() yield from q.put(1) yield from q.put(2) finished = True @tasks.coroutine def queue_get(): loop.call_later(0.01, q.get_nowait) queue_put_task = tasks.Task(queue_put(), loop=loop) yield from started.wait() self.assertFalse(finished) yield from queue_put_task self.assertTrue(finished) loop.run_until_complete(queue_get()) self.assertAlmostEqual(0.01, loop.time())
def add_getter(): q = queues.Queue(loop=loop) # Start a task that waits to get. tasks.Task(q.get(), loop=loop) # Let it start waiting. yield from tasks.sleep(0.1, loop=loop) self.assertTrue('_getters[1]' in fn(q)) # resume q.get coroutine to finish generator q.put_nowait(0)
def test_blocking_put(self): q = queues.Queue(loop=self.loop) @tasks.coroutine def queue_put(): # No maxsize, won't block. yield from q.put(1) self.loop.run_until_complete(queue_put())
def test_blocking_get(self): q = queues.Queue(loop=self.loop) q.put_nowait(1) @tasks.coroutine def queue_get(): return (yield from q.get()) res = self.loop.run_until_complete(queue_get()) self.assertEqual(1, res)
async def test_invalid_json(broker): mes = "somemessage" broker.results = queues.Queue() broker_producer = AIOKafkaProducer() await broker_producer.start() job = get_running_loop().create_task(broker.read()) await broker_producer.send("test_topic", value=mes.encode()) await broker_producer.stop() await broker.close() job.cancel()
async def test_send_ping(broker): mes = json.dumps({"Something": "good"}) broker.results = queues.Queue() broker_producer = AIOKafkaProducer() await broker_producer.start() get_running_loop().create_task(broker.read()) await broker_producer.send("test_topic", value=mes.encode()) res = await broker.results.get() await broker_producer.stop() await broker.close() assert mes == json.dumps(res)
def test_get_with_putters(self): q = queues.Queue(1, loop=self.loop) q.put_nowait(1) waiter = futures.Future(loop=self.loop) q._putters.append((2, waiter)) res = self.loop.run_until_complete(q.get()) self.assertEqual(1, res) self.assertTrue(waiter.done()) self.assertIsNone(waiter.result())
def submit(self, flow, flow_id=0, **kwargs): """ execute a set of tasks with DAG-topology into consideration :param task_names: the tasks to form DAG :return: """ assert isinstance(flow, Flow) self.executing_flow = flow self.executing_flow_id = flow_id self.kwargs = kwargs for task_name in self.executing_flow.tasks: self.context.get_task(task_name).pending(self.context, flow_id, flow.name) io_loop = asyncio.new_event_loop() asyncio.set_event_loop(io_loop) self.wait_queue = queues.Queue() self.exec_queue = queues.Queue() ret = io_loop.run_until_complete(self.execute_dag_ioloop()) io_loop.close() return ret
def test_get_cancelled_race(self): q = queues.Queue(loop=self.loop) t1 = tasks.Task(q.get(), loop=self.loop) t2 = tasks.Task(q.get(), loop=self.loop) test_utils.run_briefly(self.loop) t1.cancel() test_utils.run_briefly(self.loop) self.assertTrue(t1.done()) q.put_nowait('a') test_utils.run_briefly(self.loop) self.assertEqual(t2.result(), 'a')
def test_put_cancelled_race(self): q = queues.Queue(loop=self.loop, maxsize=1) tasks.Task(q.put('a'), loop=self.loop) tasks.Task(q.put('c'), loop=self.loop) t = tasks.Task(q.put('b'), loop=self.loop) test_utils.run_briefly(self.loop) t.cancel() test_utils.run_briefly(self.loop) self.assertTrue(t.done()) self.assertEqual(q.get_nowait(), 'a') self.assertEqual(q.get_nowait(), 'c')
def test_put_cancelled(self): q = queues.Queue(loop=self.loop) @tasks.coroutine def queue_put(): yield from q.put(1) return True @tasks.coroutine def test(): return (yield from q.get()) t = tasks.Task(queue_put(), loop=self.loop) self.assertEqual(1, self.loop.run_until_complete(test())) self.assertTrue(t.done()) self.assertTrue(t.result())
async def main(): Q = queues.Queue(maxsize=5) senders = [] for _ in range(10): sender = asyncio.create_task(email_sender(Q)) senders.append(sender) db = await connect_db('contacts.db') for contact in db: contact = { 'first_name': contact[1], 'last_name': contact[2], 'email': contact[3] } await Q.put(contact) await Q.put(None) for sender in senders: await sender
def test_maxsize(self): def gen(): when = yield self.assertAlmostEqual(0.01, when) when = yield 0.01 self.assertAlmostEqual(0.02, when) yield 0.01 loop = test_utils.TestLoop(gen) self.addCleanup(loop.close) q = queues.Queue(maxsize=2, loop=loop) self.assertEqual(2, q.maxsize) have_been_put = [] @tasks.coroutine def putter(): for i in range(3): yield from q.put(i) have_been_put.append(i) return True @tasks.coroutine def test(): t = tasks.Task(putter(), loop=loop) yield from tasks.sleep(0.01, loop=loop) # The putter is blocked after putting two items. self.assertEqual([0, 1], have_been_put) self.assertEqual(0, q.get_nowait()) # Let the putter resume and put last item. yield from tasks.sleep(0.01, loop=loop) self.assertEqual([0, 1, 2], have_been_put) self.assertEqual(1, q.get_nowait()) self.assertEqual(2, q.get_nowait()) self.assertTrue(t.done()) self.assertTrue(t.result()) loop.run_until_complete(test()) self.assertAlmostEqual(0.02, loop.time())
async def clear(self): self.queue = queues.Queue()
def __init__(self): self.queue = queues.Queue()
def test_nonblocking_get_exception(self): q = queues.Queue(loop=self.loop) self.assertRaises(queues.Empty, q.get_nowait)
def test_put_with_waiting_getters(self): q = queues.Queue(loop=self.loop) t = tasks.Task(q.get(), loop=self.loop) test_utils.run_briefly(self.loop) self.loop.run_until_complete(q.put('a')) self.assertEqual(self.loop.run_until_complete(t), 'a')
def test_nonblocking_put_exception(self): q = queues.Queue(maxsize=1, loop=self.loop) q.put_nowait(1) self.assertRaises(queues.Full, q.put_nowait, 2)
async def execute_dag_ioloop(self): """ the async process to execute task DAG :return: """ self.wait_queue = queues.Queue() self.exec_queue = queues.Queue() # add to wait queue, waiting to execute await self.wait_queue.put(set(self.executing_flow.tasks)) executing, successed, failed = set(), set(), set() async def _produce_tasks(): """ the inner async procedure of task producer :return: """ # wait until all tasks in executing queue are done await self.exec_queue.join() # reset the *executing* and *done* set executing.clear() successed.clear() failed.clear() # retrieve the task-DAG from wait-queue to exec-queue sched_task_names = await self.wait_queue.get() for sched_task_name in sched_task_names: await self.exec_queue.put(sched_task_name) self.wait_queue.task_done() async def _consume_task(): """ the inner async procedure of task consumers :return: """ loop = asyncio.get_event_loop() try: next_task_name = self.exec_queue.get_nowait() except: #logger(exec=self.executing_flow_id, flow=self.executing_flow.name).error("next task fetch failed") return logger(exec=self.executing_flow_id, flow=self.executing_flow.name).info( "pick up task [{}] ...".format(next_task_name)) try: if next_task_name in executing: logger(exec=self.executing_flow_id, flow=self.executing_flow.name).info( "task [{}] is executing, pass ...".format( next_task_name)) return next_task = self.context.get_task(next_task_name) task_deps = self.executing_flow.deps.get(next_task_name, set()) done_deps = set(filter(lambda x: x in successed, task_deps)) fail_deps = set(filter(lambda x: x in failed, task_deps)) if len(task_deps) == len(done_deps): # all dependencies are done # submit the task to threading pool to execute if len(task_deps) > 0: logger(exec=self.executing_flow_id, flow=self.executing_flow.name).info( "all dependant task(s) of task {} is done". format(next_task_name)) executing.add(next_task_name) logger(exec=self.executing_flow_id, flow=self.executing_flow.name).info( "task [{}] start executing ...".format( next_task_name)) await loop.run_in_executor( self.task_workers, partial(next_task.execute, **self.kwargs), self.context) if next_task.result_code == Task.RET_CODE_SUCCESS: logger(exec=self.executing_flow_id, flow=self.executing_flow.name).info( "task [{}] executed successfully".format( next_task_name)) successed.add(next_task_name) else: logger(exec=self.executing_flow_id, flow=self.executing_flow.name).info( "task [{}] executed failed".format( next_task_name)) failed.add(next_task_name) next_task.dispose() elif len(fail_deps) > 0: logger( exec=self.executing_flow_id, flow=self.executing_flow.name ).info( "task [{}] canceled since its dependencies [{}] failed" .format(next_task_name, fail_deps)) executing.add(next_task_name) failed.add(next_task_name) next_task.cancel(self.context, fail_deps) else: # otherwise, re-put the task into the end of the queue # sleep for 1 second await self.exec_queue.put(next_task_name) await asyncio.sleep(1) except Exception as e: logger(exec=self.executing_flow_id, flow=self.executing_flow.name).exception(str(e)) finally: self.exec_queue.task_done() async def consumer(): consumer_tasks = [] while len(successed) + len(failed) < len( self.executing_flow.tasks): # wait all task to be done if too much is issued if len(consumer_tasks) >= min(len(self.executing_flow.tasks), self.concurrency): logger( exec=self.executing_flow_id, flow=self.executing_flow.name).info( "Maybe too many tasks [{}], rest for a while ... ". format(len(consumer_tasks))) for ct in consumer_tasks: await ct consumer_tasks = [] # create a new task consumer_tasks.append(asyncio.ensure_future(_consume_task())) if len(consumer_tasks) > 0: for ct in consumer_tasks: await ct async def producer(): await _produce_tasks() # we use a single producer within the main-thread await producer() await consumer() await self.exec_queue.join() assert len(executing) == len(successed) + len(failed) retcode = Task.RET_CODE_SUCCESS if len( failed) == 0 else Task.RET_CODE_FAIL if retcode == Task.RET_CODE_SUCCESS: self.context.on_flow_success(self.executing_flow_id) else: self.context.on_flow_failed(self.executing_flow_id) return retcode
def __init__(self): self._tasks = [] self._bots = [] self._queue = queues.Queue()
def test_nonblocking_put(self): q = queues.Queue(loop=self.loop) q.put_nowait(1) self.assertEqual(1, q.get_nowait())