def wrapped(self, *args, **kwargs): if timeout is None: actual_timeout = get_async_test_timeout() else: actual_timeout = get_async_test_timeout(timeout) coro_exc = None def exc_handler(loop, context): nonlocal coro_exc # Exception is optional. coro_exc = context.get('exception', Exception(context)) # Raise CancelledError from run_until_complete below. task.cancel() self.loop.set_exception_handler(exc_handler) coro = asyncio.wait_for(f(self, *args, **kwargs), actual_timeout) task = ensure_future(coro, loop=self.loop) try: self.loop.run_until_complete(task) except: if coro_exc: # Raise the error thrown in on_timeout, with only the # traceback from the coroutine itself, not from # run_until_complete. raise coro_exc from None raise
def target(): start = time.time() timeout = get_async_test_timeout() while not change_stream.delegate: if time.time() - start > timeout: print("MotorChangeStream never created ChangeStream") return time.sleep(0.1) doclist = [{} for _ in range(n)] if isinstance(n, int) else n self.loop.call_soon_threadsafe(self.collection.insert_many, doclist)
async def test_async_try_next_updates_resume_token(self): change_stream = self.collection.watch([{"$match": {"fullDocument.a": 10}}]) # Get empty change, check non-empty resume token. _ = await change_stream.try_next() self.assertIsNotNone(change_stream.resume_token) # Insert some record that don't match the change stream filter. self.wait_and_insert(change_stream, [{"a": 19}, {"a": 20}]) # Ensure we see a new resume token even though we see no changes. initial_resume_token = copy.copy(change_stream.resume_token) async def token_change(): _ = await change_stream.try_next() return change_stream.resume_token != initial_resume_token await wait_until(token_change, "see a new resume token", timeout=get_async_test_timeout())
async def test_exhaust(self): if await server_is_mongos(self.cx): self.assertRaises(InvalidOperation, self.db.test.find, cursor_type=CursorType.EXHAUST) return self.assertRaises(ValueError, self.db.test.find, cursor_type=5) cur = self.db.test.find(cursor_type=CursorType.EXHAUST) self.assertRaises(InvalidOperation, cur.limit, 5) cur = self.db.test.find(limit=5) self.assertRaises(InvalidOperation, cur.add_option, 64) cur = self.db.test.find() cur.add_option(64) self.assertRaises(InvalidOperation, cur.limit, 5) await self.db.drop_collection("test") # Insert enough documents to require more than one batch. await self.db.test.insert_many([{} for _ in range(150)]) client = self.asyncio_client(maxPoolSize=1) # Ensure a pool. await client.db.collection.find_one() socks = get_primary_pool(client).sockets # Make sure the socket is returned after exhaustion. cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST) has_next = await cur.fetch_next self.assertTrue(has_next) self.assertEqual(0, len(socks)) while await cur.fetch_next: cur.next_object() self.assertEqual(1, len(socks)) # Same as previous but with to_list instead of next_object. docs = await client[self.db.name ].test.find(cursor_type=CursorType.EXHAUST ).to_list(None) self.assertEqual(1, len(socks)) self.assertEqual((await self.db.test.count_documents({})), len(docs)) # If the Cursor instance is discarded before being # completely iterated we have to close and # discard the socket. sock = one(socks) cur = client[self.db.name].test.find( cursor_type=CursorType.EXHAUST).batch_size(1) await cur.fetch_next self.assertTrue(cur.next_object()) # Run at least one getMore to initiate the OP_MSG exhaust protocol. if env.version.at_least(4, 2): await cur.fetch_next self.assertTrue(cur.next_object()) self.assertEqual(0, len(socks)) if "PyPy" in sys.version: # Don't wait for GC or use gc.collect(), it's unreliable. await cur.close() del cur async def sock_closed(): return sock not in socks and sock.closed await wait_until(sock_closed, "close exhaust cursor socket", timeout=get_async_test_timeout()) # The exhaust cursor's socket was discarded, although another may # already have been opened to send OP_KILLCURSORS. self.assertNotIn(sock, socks) self.assertTrue(sock.closed)