def test_timeout_does_not_mark_member_down(self): # If a query times out, the client shouldn't mark the member "down". # Disable background refresh. with client_knobs(heartbeat_frequency=999999): c = rs_client(socketTimeoutMS=3000, w=self.w) collection = c.pymongo_test.test collection.insert_one({}) # Query the primary. self.assertRaises( NetworkTimeout, collection.find_one, {'$where': delay(5)}) self.assertTrue(c.primary) collection.find_one() # No error. coll = collection.with_options( read_preference=ReadPreference.SECONDARY) # Query the secondary. self.assertRaises( NetworkTimeout, coll.find_one, {'$where': delay(5)}) self.assertTrue(c.secondaries) # No error. coll.find_one()
def test_timeout_does_not_mark_member_down(self): # If a query times out, the RS client shouldn't mark the member "down". c = self._get_client(socketTimeoutMS=3000) collection = c.pymongo_test.test collection.insert({}, w=self.w) # Query the primary. self.assertRaises( ConnectionFailure, collection.find_one, {'$where': delay(5)}) # primary_member returns None if primary is marked "down". rs_state = c._MongoReplicaSetClient__rs_state self.assertTrue(rs_state.primary_member) collection.find_one() # No error. # Query the secondary. self.assertRaises( ConnectionFailure, collection.find_one, {'$where': delay(5)}, read_preference=SECONDARY) rs_state = c._MongoReplicaSetClient__rs_state secondary_host = one(rs_state.secondaries) self.assertTrue(rs_state.get(secondary_host)) collection.find_one(read_preference=SECONDARY) # No error.
def test_wait_queue_timeout(self): # Do a find_one that takes 1 second, and set waitQueueTimeoutMS to 500, # 5000, and None. Verify timeout iff max_wait_time < 1 sec. where_delay = 1 for waitQueueTimeoutMS in [500]:#500, 5000, None: cx = yield self.motor_client( max_pool_size=1, waitQueueTimeoutMS=waitQueueTimeoutMS) pool = cx._get_pools()[0] if waitQueueTimeoutMS: self.assertEqual( waitQueueTimeoutMS, pool.wait_queue_timeout * 1000) else: self.assertTrue(pool.wait_queue_timeout is None) collection = cx.pymongo_test.test_collection cb = yield gen.Callback('find_one') collection.find_one({'$where': delay(where_delay)}, callback=cb) if waitQueueTimeoutMS and waitQueueTimeoutMS < where_delay * 1000: with assert_raises(pymongo.errors.ConnectionFailure): yield collection.find_one() else: # No error yield collection.find_one() yield gen.Wait('find_one') cx.close()
def test_find_is_async(self): # Confirm find() is async by launching two operations which will finish # out of order. Also test that MotorClient doesn't reuse sockets # incorrectly. # Launch find operations for _id's 1 and 2 which will finish in order # 2, then 1. results = [] yield_points = [(yield gen.Callback(0)), (yield gen.Callback(1))] def callback(result, error): if result: results.append(result) yield_points.pop()() # This find() takes 0.5 seconds self.cx.pymongo_test.test_collection.find( {'_id': 1, '$where': delay(0.5)}, fields={'s': True, '_id': False} ).limit(1).each(callback) # Very fast lookup self.cx.pymongo_test.test_collection.find( {'_id': 2}, fields={'s': True, '_id': False} ).limit(1).each(callback) yield gen.WaitAll([0, 1]) # Results were appended in order 2, 1 self.assertEqual( [{'s': hex(s)} for s in (2, 1)], results)
def test_network_timeout(self): no_timeout = self._get_connection() timeout_sec = 1 timeout = self._get_connection(socketTimeoutMS=timeout_sec*1000) no_timeout.pymongo_test.drop_collection("test") no_timeout.pymongo_test.test.insert({"x": 1}, safe=True) # A $where clause that takes a second longer than the timeout where_func = delay(1 + timeout_sec) def get_x(db): doc = db.test.find().where(where_func).next() return doc["x"] self.assertEqual(1, get_x(no_timeout.pymongo_test)) self.assertRaises(ConnectionFailure, get_x, timeout.pymongo_test) def get_x_timeout(db, t): doc = db.test.find(network_timeout=t).where(where_func).next() return doc["x"] self.assertEqual(1, get_x_timeout(timeout.pymongo_test, None)) self.assertRaises(ConnectionFailure, get_x_timeout, no_timeout.pymongo_test, 0.1) no_timeout.close() timeout.close()
def test_find_is_async(self): # Need parallel Javascript. if not (yield from at_least(self.cx, (3,))): raise SkipTest("Requires MongoDB >= 3.0") # Confirm find() is async by launching two operations which will finish # out of order. Also test that AsyncIOMotorClient doesn't reuse sockets # incorrectly. # Launch find operations for _id's 1 and 2 which will finish in order # 2, then 1. coll = self.collection yield from coll.insert_many([{'_id': 1}, {'_id': 2}]) results = [] futures = [asyncio.Future(loop=self.loop), asyncio.Future(loop=self.loop)] def callback(result, error): if result: results.append(result) futures.pop().set_result(None) # This find() takes 0.5 seconds. coll.find({'_id': 1, '$where': delay(0.5)}).limit(1).each(callback) # Very fast lookup. coll.find({'_id': 2}).limit(1).each(callback) yield from asyncio.gather(*futures, loop=self.loop) # Results were appended in order 2, 1. self.assertEqual([{'_id': 2}, {'_id': 1}], results)
def test_wait_queue_timeout(self): # Do a find_one that takes 1 second, and set waitQueueTimeoutMS to 500, # 5000, and None. Verify timeout iff max_wait_time < 1 sec. where_delay = 1 yield from self.collection.insert({}) for waitQueueTimeoutMS in (500, 5000, None): cx = self.asyncio_client( max_pool_size=1, waitQueueTimeoutMS=waitQueueTimeoutMS) yield from cx.open() pool = cx._get_primary_pool() if waitQueueTimeoutMS: self.assertEqual( waitQueueTimeoutMS, pool.wait_queue_timeout * 1000) else: self.assertTrue(pool.wait_queue_timeout is None) collection = cx.motor_test.test_collection future = collection.find_one({'$where': delay(where_delay)}) if waitQueueTimeoutMS and waitQueueTimeoutMS < where_delay * 1000: with assert_raises(pymongo.errors.ConnectionFailure): yield from collection.find_one() else: # No error yield from collection.find_one() yield from future cx.close()
def test_find_is_async(self): # Confirm find() is async by launching two operations which will finish # out of order. Also test that MotorClient doesn't reuse sockets # incorrectly. # Launch find operations for _id's 1 and 2 which will finish in order # 2, then 1. coll = self.collection yield coll.insert([{'_id': 1}, {'_id': 2}]) results = [] futures = [Future(), Future()] def callback(result, error): if result: results.append(result) futures.pop().set_result(None) # This find() takes 0.5 seconds. coll.find({'_id': 1, '$where': delay(0.5)}).limit(1).each(callback) # Very fast lookup. coll.find({'_id': 2}).limit(1).each(callback) yield futures # Results were appended in order 2, 1. self.assertEqual([{'_id': 2}, {'_id': 1}], results)
def test_find_one_is_async(self): # Confirm find_one() is async by launching two operations which will # finish out of order. # Launch 2 find_one operations for _id's 1 and 2, which will finish in # order 2 then 1. coll = self.collection yield coll.insert([{'_id': 1}, {'_id': 2}]) results = [] futures = [Future(), Future()] def callback(result, error): if result: results.append(result) futures.pop().set_result(None) # This find_one() takes 3 seconds. coll.find_one({'_id': 1, '$where': delay(3)}, callback=callback) # Very fast lookup. coll.find_one({'_id': 2}, callback=callback) yield futures # Results were appended in order 2, 1. self.assertEqual([{'_id': 2}, {'_id': 1}], results)
def f(): for _ in range(5): collection.find_one({'$where': delay(0.1)}) assert len(cx_pool.sockets) <= max_pool_size with lock: self.n_passed += 1
def test_find_one_is_async(self): # Confirm find_one() is async by launching two operations which will # finish out of order. # Launch 2 find_one operations for _id's 1 and 2, which will finish in # order 2 then 1. results = [] yield_points = [(yield gen.Callback(0)), (yield gen.Callback(1))] def callback(result, error): if result: results.append(result) yield_points.pop()() # This find_one() takes 3 seconds self.cx.pymongo_test.test_collection.find_one( {'_id': 1, '$where': delay(3)}, fields={'s': True, '_id': False}, callback=callback) # Very fast lookup self.cx.pymongo_test.test_collection.find_one( {'_id': 2}, fields={'s': True, '_id': False}, callback=callback) yield gen.WaitAll([0, 1]) # Results were appended in order 2, 1 self.assertEqual([{'s': hex(s)} for s in (2, 1)], results)
def test_max_concurrent(self, done): if not self.sync_cx.server_info().get('javascriptEngine') == 'V8': raise SkipTest("Need multithreaded Javascript in mongod for test") # Make sure we can override max_size and max_concurrent max_pool_size = 5 max_concurrent = 20 cx = motor.MotorClient( host, port, max_pool_size=max_pool_size, max_concurrent=max_concurrent ).open_sync() pool = cx._get_pools()[0] self.assertEqual(max_pool_size, pool.max_size) self.assertEqual(max_concurrent, pool.max_concurrent) # Start empty self.assertEqual(0, pool.motor_sock_counter.count()) self.assertEqual(0, len(pool.sockets)) # Grow to max_concurrent ops_completed = yield gen.Callback('ops_completed') nops = 100 results = [] def callback(i, result, error): self.assertFalse(error) results.append(i) if len(results) == nops: ops_completed() collection = cx.pymongo_test.test_collection for i in range(nops): # Introduce random delay, avg 5ms, just to make sure we're async collection.find_one({'$where': delay(random.random() / 10)}, callback=functools.partial(callback, i)) # Active sockets tops out at max_concurrent, which defaults to 100 expected_active_socks = min(max_concurrent, i + 1) self.assertEqual( expected_active_socks, pool.motor_sock_counter.count()) self.assertEqual(0, len(pool.sockets)) yield gen.Wait('ops_completed') # All ops completed, but not in order self.assertEqual(list(range(nops)), sorted(results)) self.assertNotEqual(list(range(nops)), results) # Shrunk back to max_pool_size self.assertEqual(max_pool_size, pool.motor_sock_counter.count()) self.assertEqual(max_pool_size, len(pool.sockets)) done()
def run_mongo_thread(self): # Do an operation that requires a socket. # test_max_pool_size uses this to spin up lots of threads requiring # lots of simultaneous connections, to ensure that Pool obeys its # max_size configuration and closes extra sockets as they're returned. # We need a delay here to ensure that more than max_size sockets are # needed at once. for i in range(self.start_request): self.connection.start_request() self.connection[DB].test.find_one({'$where': delay(0.1)}) for i in range(self.end_request): self.connection.end_request()
def test_max_size(self): if not test.sync_cx.server_info().get('javascriptEngine') == 'V8': raise SkipTest("Need multithreaded Javascript in mongod for test") max_pool_size = 5 cx = self.motor_client(max_pool_size=max_pool_size) # Lazy connection. self.assertEqual(None, cx._get_primary_pool()) yield cx.db.collection.find_one() pool = cx._get_primary_pool() self.assertEqual(max_pool_size, pool.max_size) self.assertEqual(1, len(pool.sockets)) self.assertEqual(1, pool.motor_sock_counter) # Grow to max_pool_size. ops_completed = Future() nops = 100 results = [] def callback(i, result, error): self.assertFalse(error) results.append(i) if len(results) == nops: ops_completed.set_result(None) collection = cx.motor_test.test_collection yield collection.insert({}) # Need a document. for i in range(nops): # Introduce random delay, avg 5ms, just to make sure we're async. collection.find_one( {'$where': delay(random.random() / 10)}, callback=functools.partial(callback, i)) # Active sockets tops out at max_pool_size. expected_active_socks = min(max_pool_size, i + 1) self.assertEqual( expected_active_socks, pool.motor_sock_counter) self.assertEqual(0, len(pool.sockets)) yield ops_completed # All ops completed, but not in order. self.assertEqual(list(range(nops)), sorted(results)) self.assertNotEqual(list(range(nops)), results) self.assertEqual(max_pool_size, len(pool.sockets)) self.assertEqual(max_pool_size, pool.motor_sock_counter) cx.close()
def find_slow(): if use_request: cx.start_request() history.append('find_slow start') # Javascript function that pauses 5 seconds. fn = delay(5) self.assertEqual(1, db.test.find({"$where": fn}).count()) history.append('find_slow done') if use_request: cx.end_request()
def test_timeout(self): # Launch two slow find_ones. The one with a timeout should get an error no_timeout = yield self.motor_client() timeout = yield self.motor_client(host, port, socketTimeoutMS=100) query = {'$where': delay(0.5), '_id': 1} timeout_fut = timeout.pymongo_test.test_collection.find_one(query) notimeout_fut = no_timeout.pymongo_test.test_collection.find_one(query) error = None try: yield [timeout_fut, notimeout_fut] except pymongo.errors.AutoReconnect, e: error = e
def test_interrupt_signal(self): if sys.platform.startswith('java'): raise SkipTest("Can't test interrupts in Jython") # Test fix for PYTHON-294 -- make sure client closes its socket if it # gets an interrupt while waiting to recv() from it. c = self._get_client() db = c.pymongo_test # A $where clause which takes 1.5 sec to execute where = delay(1.5) # Need exactly 1 document so find() will execute its $where clause once db.drop_collection('foo') db.foo.insert({'_id': 1}) old_signal_handler = None try: def interrupter(): time.sleep(0.25) # Raises KeyboardInterrupt in the main thread thread.interrupt_main() thread.start_new_thread(interrupter, ()) raised = False try: # Will be interrupted by a KeyboardInterrupt. db.foo.find({'$where': where}).next() except KeyboardInterrupt: raised = True # Can't use self.assertRaises() because it doesn't catch system # exceptions self.assertTrue(raised, "Didn't raise expected ConnectionFailure") # Raises AssertionError due to PYTHON-294 -- Mongo's response to the # previous find() is still waiting to be read on the socket, so the # request id's don't match. self.assertEqual( {'_id': 1}, db.foo.find().next() ) finally: if old_signal_handler: signal.signal(signal.SIGALRM, old_signal_handler)
def test_network_timeout(self): no_timeout = self._get_client() timeout_sec = 1 timeout = self._get_client(socketTimeoutMS=timeout_sec*1000) no_timeout.pymongo_test.drop_collection("test") no_timeout.pymongo_test.test.insert({"x": 1}) # A $where clause that takes a second longer than the timeout. query = {'$where': delay(1 + timeout_sec)} no_timeout.pymongo_test.test.find_one(query) # No error. try: timeout.pymongo_test.test.find_one(query) except AutoReconnect, e: self.assertTrue('%d: timed out' % (port,) in e.args[0])
def test_socket_timeout(self): no_timeout = self.client timeout_sec = 1 timeout = rs_or_single_client(socketTimeoutMS=1000 * timeout_sec) no_timeout.pymongo_test.drop_collection("test") no_timeout.pymongo_test.test.insert_one({"x": 1}) # A $where clause that takes a second longer than the timeout where_func = delay(timeout_sec + 1) def get_x(db): doc = next(db.test.find().where(where_func)) return doc["x"] self.assertEqual(1, get_x(no_timeout.pymongo_test)) self.assertRaises(NetworkTimeout, get_x, timeout.pymongo_test)
def find_slow(): if use_request: cx.start_request() history.append('find_slow start') # Javascript function that pauses for half a second where = delay(0.5) results['find_slow_result'] = list(db.test.find( {'$where': where} )) history.append('find_slow done') if use_request: cx.end_request()
def test_timeout(self): # Launch two slow find_ones. The one with a timeout should get an error no_timeout = self.motor_client() timeout = self.motor_client(host, port, socketTimeoutMS=100) query = {'$where': delay(0.5), '_id': 1} # Need a document, or the $where clause isn't executed. yield no_timeout.motor_test.test_collection.insert({'_id': 1}) timeout_fut = timeout.motor_test.test_collection.find_one(query) notimeout_fut = no_timeout.motor_test.test_collection.find_one(query) error = None try: yield [timeout_fut, notimeout_fut] except pymongo.errors.AutoReconnect, e: error = e
def test_timeout(self, done): # Launch two slow find_ones. The one with a timeout should get an error no_timeout = self.motor_connection(host, port) timeout = self.motor_connection(host, port, socketTimeoutMS=100) query = {"$where": delay(0.5), "_id": 1} timeout.pymongo_test.test_collection.find_one(query, callback=(yield gen.Callback("timeout"))) no_timeout.pymongo_test.test_collection.find_one(query, callback=(yield gen.Callback("no_timeout"))) timeout_result, no_timeout_result = yield gen.WaitAll(["timeout", "no_timeout"]) self.assertEqual(str(timeout_result.args[1]), "timed out") self.assertTrue(isinstance(timeout_result.args[1], pymongo.errors.AutoReconnect)) self.assertEqual({"_id": 1, "s": hex(1)}, no_timeout_result.args[0]) done()
def find_slow(): if use_request: cx.start_request() history.append('find_slow start') # Javascript function that pauses 5 sec. 'nolock' allows find_fast # to start and finish while we're waiting for this. fn = delay(5) self.assertEqual( {'ok': 1.0, 'retval': True}, db.command('eval', fn, nolock=True)) history.append('find_slow done') if use_request: cx.end_request()
def test_socket_timeout(self): no_timeout = MongoClient(host, port) timeout_sec = 1 timeout = MongoClient( host, port, socketTimeoutMS=1000 * timeout_sec) no_timeout.pymongo_test.drop_collection("test") no_timeout.pymongo_test.test.insert({"x": 1}) # A $where clause that takes a second longer than the timeout where_func = delay(timeout_sec + 1) def get_x(db): doc = db.test.find().where(where_func).next() return doc["x"] self.assertEqual(1, get_x(no_timeout.pymongo_test)) self.assertRaises(ConnectionFailure, get_x, timeout.pymongo_test)
def test_interrupt_signal(self): if sys.platform.startswith('java'): # We can't figure out how to raise an exception on a thread that's # blocked on a socket, whether that's the main thread or a worker, # without simply killing the whole thread in Jython. This suggests # PYTHON-294 can't actually occur in Jython. raise SkipTest("Can't test interrupts in Jython") # Test fix for PYTHON-294 -- make sure Connection closes its # socket if it gets an interrupt while waiting to recv() from it. c = get_connection() db = c.pymongo_test # A $where clause which takes 1.5 sec to execute where = delay(1.5) # Need exactly 1 document so find() will execute its $where clause once db.drop_collection('foo') db.foo.insert({'_id': 1}, safe=True) def interrupter(): # Raises KeyboardInterrupt in the main thread time.sleep(0.25) thread.interrupt_main() thread.start_new_thread(interrupter, ()) raised = False try: # Will be interrupted by a KeyboardInterrupt. db.foo.find({'$where': where}).next() except KeyboardInterrupt: raised = True # Can't use self.assertRaises() because it doesn't catch system # exceptions self.assertTrue(raised, "Didn't raise expected KeyboardInterrupt") # Raises AssertionError due to PYTHON-294 -- Mongo's response to the # previous find() is still waiting to be read on the socket, so the # request id's don't match. self.assertEqual( {'_id': 1}, db.foo.find().next() )
def test_max_size(self): if not test.env.v8: raise SkipTest("Need multithreaded Javascript in mongod for test") max_pool_size = 5 cx = self.asyncio_client(max_pool_size=max_pool_size) # Lazy connection. self.assertEqual(None, cx._get_primary_pool()) yield from cx.motor_test.test_collection.remove() pool = cx._get_primary_pool() self.assertEqual(max_pool_size, pool.max_size) self.assertEqual(1, len(pool.sockets)) self.assertEqual(1, pool.motor_sock_counter) # Grow to max_pool_size. ops_completed = asyncio.Future(loop=self.loop) nops = 100 results = [] def callback(i, result, error): self.assertFalse(error) self.assertFalse(pool.motor_sock_counter > max_pool_size) results.append(i) if len(results) == nops: ops_completed.set_result(None) collection = cx.motor_test.test_collection yield from collection.insert({}) # Need a document. for i in range(nops): # Introduce random delay, avg 5ms, just to make sure we're async. collection.find_one( {'$where': delay(random.random() / 10)}, callback=functools.partial(callback, i)) yield from ops_completed # All ops completed, but not in order. self.assertEqual(list(range(nops)), sorted(results)) self.assertNotEqual(list(range(nops)), results) self.assertEqual(max_pool_size, len(pool.sockets)) self.assertEqual(max_pool_size, pool.motor_sock_counter) cx.close()
def test_max_wait(self, done): where_delay = .4 for max_wait_time in .2, .6, None: cx = motor.MotorClient( host, port, max_concurrent=1, max_wait_time=max_wait_time, ).open_sync() pool = cx._get_pools()[0] self.assertEqual(max_wait_time, pool.max_wait_time) collection = cx.pymongo_test.test_collection cb = yield gen.Callback('find_one') collection.find_one({'$where': delay(where_delay)}, callback=cb) if max_wait_time and max_wait_time < where_delay: yield AssertRaises(motor.MotorPoolTimeout, collection.find_one) else: # No error yield motor.Op(collection.find_one) yield gen.Wait('find_one') done()
def test_timeout(self): # Launch two slow find_ones. The one with a timeout should get an error no_timeout_client = self.asyncio_client() timeout = self.asyncio_client(socketTimeoutMS=100) query = {'$where': delay(0.4), '_id': 1} # Need a document, or the $where clause isn't executed. test_collection = no_timeout_client.motor_test.test_collection yield from test_collection.drop() yield from test_collection.insert({'_id': 1}) timeout_fut = timeout.motor_test.test_collection.find_one(query) notimeout_fut = no_timeout_client.motor_test.test_collection.find_one( query) yield from asyncio.gather(timeout_fut, notimeout_fut, return_exceptions=True, loop=self.loop) self.assertEqual(str(timeout_fut.exception()), 'timed out') self.assertEqual({'_id': 1}, notimeout_fut.result()) no_timeout_client.close() timeout.close()
def find_slow(): if use_request: cx.start_request() history.append("find_slow start") # Javascript function that pauses N seconds per document fn = delay(10) if is_mongos(db.connection) or not version.at_least(db.connection, (1, 7, 2)): # mongos doesn't support eval so we have to use $where # which is less reliable in this context. self.assertEqual(1, db.test.find({"$where": fn}).count()) else: # 'nolock' allows find_fast to start and finish while we're # waiting for this to complete. self.assertEqual({"ok": 1.0, "retval": True}, db.command("eval", fn, nolock=True)) history.append("find_slow done") if use_request: cx.end_request()
def test_max_wait(self): # Do a find_one that takes 1 second, and set max_wait_time to .5 sec, # 1 sec, and None. Verify timeout iff max_wait_time < 1 sec. where_delay = 1 for max_wait_time in .5, 2, None: cx = yield self.motor_client( max_concurrent=1, max_wait_time=max_wait_time) pool = cx._get_pools()[0] self.assertEqual(max_wait_time, pool.max_wait_time) collection = cx.pymongo_test.test_collection cb = yield gen.Callback('find_one') collection.find_one({'$where': delay(where_delay)}, callback=cb) if max_wait_time and max_wait_time < where_delay: with assert_raises(motor.MotorPoolTimeout): yield collection.find_one() else: # No error yield collection.find_one() yield gen.Wait('find_one') cx.close()
def run(self): self.db.authenticate(self.username, self.password) assert self.db.test.find_one({'$where': delay(1)}) is not None self.success = True
def f(): for _ in range(5): collection.find_one({'$where': delay(0.1)}) with lock: self.n_passed += 1
def run(self): assert self.collection.find_one({'$where': delay(1)}) is not None self.success = True
def test_interrupt_signal(self): if sys.platform.startswith('java'): raise SkipTest("Can't test interrupts in Jython") # Test fix for PYTHON-294 -- make sure client closes its socket if it # gets an interrupt while waiting to recv() from it. c = self._get_client() db = c.pymongo_test # A $where clause which takes 1.5 sec to execute where = delay(1.5) # Need exactly 1 document so find() will execute its $where clause once db.drop_collection('foo') db.foo.insert({'_id': 1}) old_signal_handler = None try: # Platform-specific hacks for raising a KeyboardInterrupt on the main # thread while find() is in-progress: On Windows, SIGALRM is unavailable # so we use second thread. In our Bamboo setup on Linux, the thread # technique causes an error in the test at sock.recv(): # TypeError: 'int' object is not callable # We don't know what causes this in Bamboo, so we hack around it. if sys.platform == 'win32': def interrupter(): time.sleep(0.25) # Raises KeyboardInterrupt in the main thread thread.interrupt_main() thread.start_new_thread(interrupter, ()) else: # Convert SIGALRM to SIGINT -- it's hard to schedule a SIGINT for one # second in the future, but easy to schedule SIGALRM. def sigalarm(num, frame): raise KeyboardInterrupt old_signal_handler = signal.signal(signal.SIGALRM, sigalarm) signal.alarm(1) raised = False try: # Will be interrupted by a KeyboardInterrupt. db.foo.find({'$where': where}).next() except KeyboardInterrupt: raised = True # Can't use self.assertRaises() because it doesn't catch system # exceptions self.assertTrue(raised, "Didn't raise expected ConnectionFailure") # Raises AssertionError due to PYTHON-294 -- Mongo's response to the # previous find() is still waiting to be read on the socket, so the # request id's don't match. self.assertEqual({'_id': 1}, db.foo.find().next()) finally: if old_signal_handler: signal.signal(signal.SIGALRM, old_signal_handler)
def find_one(): docs.append(client.test.test.find_one({'$where': delay(0.001)}))