def test_cursor_manager(self): self.close_was_called = False test_case = self class CM(CursorManager): def __init__(self, client): super(CM, self).__init__(client) def close(self, cursor_id, address): test_case.close_was_called = True super(CM, self).close(cursor_id, address) with client_knobs(kill_cursor_frequency=0.01): client = rs_or_single_client(maxPoolSize=1) client.set_cursor_manager(CM) # Create a cursor on the same client so we're certain the getMore # is sent after the killCursors message. cursor = client.pymongo_test.test.find().batch_size(1) next(cursor) client.close_cursor( cursor.cursor_id, _CursorAddress(self.client.address, self.collection.full_name)) def raises_cursor_not_found(): try: next(cursor) return False except CursorNotFound: return True wait_until(raises_cursor_not_found, 'close cursor') self.assertTrue(self.close_was_called)
def create_mock_monitor(self, responses, uri, expected_results): with client_knobs(heartbeat_frequency=0.1, events_queue_frequency=0.1): class MockMonitor(Monitor): def _check_with_socket(self, sock_info): if isinstance(responses[1], Exception): raise responses[1] return IsMaster(responses[1]), 99 m = single_client(h=uri, event_listeners=(self.all_listener,), _monitor_class=MockMonitor, _pool_class=MockPool ) expected_len = len(expected_results) wait_until(lambda: len(self.all_listener.results) == expected_len, "publish all events", timeout=15) try: for i in range(len(expected_results)): result = self.all_listener.results[i] if len( self.all_listener.results) > i else None self.assertEqual(expected_results[i], result.__class__.__name__) self.assertEqual(result.connection_id, responses[0]) if expected_results[i] != 'ServerHeartbeatStartedEvent': if isinstance(result.reply, IsMaster): self.assertEqual(result.duration, 99) self.assertEqual(result.reply._doc, responses[1]) else: self.assertEqual(result.reply, responses[1]) finally: m.close()
def test_wire_version(self): c = MockClient(standalones=[], members=['a:1', 'b:2', 'c:3'], mongoses=[], host='a:1', replicaSet='rs', connect=False) self.addCleanup(c.close) c.set_wire_version_range('a:1', 3, 7) c.set_wire_version_range('b:2', 2, 3) c.set_wire_version_range('c:3', 3, 4) c.db.command('ismaster') # Connect. # A secondary doesn't overlap with us. c.set_wire_version_range('b:2', MAX_SUPPORTED_WIRE_VERSION + 1, MAX_SUPPORTED_WIRE_VERSION + 2) def raises_configuration_error(): try: c.db.collection.find_one() return False except ConfigurationError: return True wait_until(raises_configuration_error, 'notice we are incompatible with server') self.assertRaises(ConfigurationError, c.db.collection.insert_one, {})
def test_failover(self): nthreads = 10 client = connected(self.mock_client(localThresholdMS=0.001)) wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') # Our chosen mongos goes down. client.kill_host('a:1') # Trigger failover to higher-latency nodes. AutoReconnect should be # raised at most once in each thread. passed = [] def f(): try: client.db.command('ping') except AutoReconnect: # Second attempt succeeds. client.db.command('ping') passed.append(True) threads = [threading.Thread(target=f) for _ in range(nthreads)] for t in threads: t.start() for t in threads: t.join() self.assertEqual(nthreads, len(passed)) # Down host removed from list. self.assertEqual(2, len(client.nodes))
def test_client(self): c = MockClient( standalones=[], members=['a:1', 'b:2', 'c:3'], mongoses=[], host='a:1,b:2,c:3', replicaSet='rs', serverSelectionTimeoutMS=100) # MongoClient connects to primary by default. wait_until(lambda: c.address is not None, 'connect to primary') self.assertEqual(c.address, ('a', 1)) # C is brought up as a standalone. c.mock_members.remove('c:3') c.mock_standalones.append('c:3') # Fail over. c.kill_host('a:1') c.kill_host('b:2') # Force reconnect. c.close() with self.assertRaises(AutoReconnect): c.db.command('ismaster') self.assertEqual(c.address, None)
def _test_network_error(self, operation_callback): # Verify only the disconnected server is reset by a network failure. # Disable background refresh. with client_knobs(heartbeat_frequency=999999): c = MockClient( standalones=[], members=["a:1", "b:2"], mongoses=[], host="a:1", replicaSet="rs", connect=False ) # Set host-specific information so we can test whether it is reset. c.set_wire_version_range("a:1", 0, 1) c.set_wire_version_range("b:2", 0, 2) c._get_topology().select_servers(writable_server_selector) wait_until(lambda: len(c.nodes) == 2, "connect") c.kill_host("a:1") # MongoClient is disconnected from the primary. self.assertRaises(AutoReconnect, operation_callback, c) # The primary's description is reset. server_a = c._get_topology().get_server_by_address(("a", 1)) sd_a = server_a.description self.assertEqual(SERVER_TYPE.Unknown, sd_a.server_type) self.assertEqual(0, sd_a.min_wire_version) self.assertEqual(0, sd_a.max_wire_version) # ...but not the secondary's. server_b = c._get_topology().get_server_by_address(("b", 2)) sd_b = server_b.description self.assertEqual(SERVER_TYPE.RSSecondary, sd_b.server_type) self.assertEqual(0, sd_b.min_wire_version) self.assertEqual(2, sd_b.max_wire_version)
def test_discover_primary(self): # Disable background refresh. with client_knobs(heartbeat_frequency=999999): c = MockClient( standalones=[], members=["a:1", "b:2", "c:3"], mongoses=[], host="b:2", # Pass a secondary. replicaSet="rs", ) wait_until(lambda: len(c.nodes) == 3, "connect") self.assertEqual(c.address, ("a", 1)) # Fail over. c.kill_host("a:1") c.mock_primary = "b:2" c.close() self.assertEqual(0, len(c.nodes)) t = c._get_topology() t.select_servers(writable_server_selector) # Reconnect. self.assertEqual(c.address, ("b", 2)) # a:1 not longer in nodes. self.assertLess(len(c.nodes), 3) # c:3 is rediscovered. t.select_server_by_address(("c", 3))
def test_ipv6(self): c = MongoClient("mongodb://[::1]:%d" % (port,), replicaSet=self.name) # Client switches to IPv4 once it has first ismaster response. msg = 'discovered primary with IPv4 address "%r"' % (self.primary,) wait_until(lambda: c.primary == self.primary, msg) # Same outcome with both IPv4 and IPv6 seeds. c = MongoClient("[::1]:%d,localhost:%d" % (port, port), replicaSet=self.name) wait_until(lambda: c.primary == self.primary, msg) if client_context.auth_enabled: auth_str = "%s:%s@" % (db_user, db_pwd) else: auth_str = "" uri = "mongodb://%slocalhost:%d,[::1]:%d" % (auth_str, port, port) client = MongoClient(uri, replicaSet=self.name) client.pymongo_test.test.insert_one({"dummy": u"object"}) client.pymongo_test_bernie.test.insert_one({"dummy": u"object"}) dbs = client.database_names() self.assertTrue("pymongo_test" in dbs) self.assertTrue("pymongo_test_bernie" in dbs) client.close()
def test_ipv6(self): if client_context.ssl: # http://bugs.python.org/issue13034 if sys.version_info[:2] == (2, 6): raise SkipTest("Python 2.6 can't parse SANs") if not HAVE_IPADDRESS: raise SkipTest("Need the ipaddress module to test with SSL") port = client_context.port c = rs_client("mongodb://[::1]:%d" % (port,)) # Client switches to IPv4 once it has first ismaster response. msg = 'discovered primary with IPv4 address "%r"' % (self.primary,) wait_until(lambda: c.primary == self.primary, msg) # Same outcome with both IPv4 and IPv6 seeds. c = rs_client("mongodb://[::1]:%d,localhost:%d" % (port, port)) wait_until(lambda: c.primary == self.primary, msg) if client_context.auth_enabled: auth_str = "%s:%s@" % (db_user, db_pwd) else: auth_str = "" uri = "mongodb://%slocalhost:%d,[::1]:%d" % (auth_str, port, port) client = rs_client(uri) client.pymongo_test.test.insert_one({"dummy": u"object"}) client.pymongo_test_bernie.test.insert_one({"dummy": u"object"}) dbs = client.database_names() self.assertTrue("pymongo_test" in dbs) self.assertTrue("pymongo_test_bernie" in dbs) client.close()
def test_session_gc(self): client = rs_client() self.addCleanup(client.close) pool = get_pool(client) session = client.start_session() session.start_transaction() client.test_session_gc.test.find_one({}, session=session) if client_context.load_balancer: self.assertEqual(pool.active_sockets, 1) # Pinned. thread = PoolLocker(pool) thread.start() self.assertTrue(thread.locked.wait(5), 'timed out') # Garbage collect the session while the pool is locked to ensure we # don't deadlock. del session # On PyPy it can take a few rounds to collect the session. for _ in range(3): gc.collect() thread.unlock.set() thread.join(5) self.assertFalse(thread.is_alive()) self.assertIsNone(thread.exc) wait_until(lambda: pool.active_sockets == 0, 'return socket') # Run another operation to ensure the socket still works. client[self.db.name].test.delete_many({})
def test_client(self): c = MockClient(standalones=[], members=['a:1', 'b:2'], mongoses=[], host='a:1', replicaSet='rs') self.addCleanup(c.close) wait_until(lambda: len(c.nodes) == 2, 'discover both nodes') # MongoClient connects to primary by default. self.assertEqual(c.address, ('a', 1)) self.assertEqual(set([('a', 1), ('b', 2)]), c.nodes) # C is added. c.mock_members.append('c:3') c.mock_ismaster_hosts.append('c:3') c.close() c.db.command('ismaster') self.assertEqual(c.address, ('a', 1)) wait_until(lambda: set([('a', 1), ('b', 2), ('c', 3)]) == c.nodes, 'reconnect to both secondaries')
def test_client(self): c = MockClient( standalones=[], members=['a:1', 'b:2'], mongoses=[], host='a:1', replicaSet='rs') wait_until(lambda: len(c.nodes) == 2, 'discover both nodes') # MongoClient connects to primary by default. self.assertEqual(c.address, ('a', 1)) self.assertEqual(set([('a', 1), ('b', 2)]), c.nodes) # C is added. c.mock_members.append('c:3') c.mock_ismaster_hosts.append('c:3') c.close() c.db.command('ismaster') self.assertEqual(c.address, ('a', 1)) wait_until(lambda: set([('a', 1), ('b', 2), ('c', 3)]) == c.nodes, 'reconnect to both secondaries')
def test_local_threshold(self): client = connected(self.mock_client(localThresholdMS=30)) self.assertEqual(30, client.options.local_threshold_ms) wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') topology = client._topology # All are within a 30-ms latency window, see self.mock_client(). self.assertEqual(set([('a', 1), ('b', 2), ('c', 3)]), writable_addresses(topology)) # No error client.admin.command('ping') client = connected(self.mock_client(localThresholdMS=0)) self.assertEqual(0, client.options.local_threshold_ms) # No error client.db.command('ping') # Our chosen mongos goes down. client.kill_host('%s:%s' % next(iter(client.nodes))) try: client.db.command('ping') except: pass # We eventually connect to a new mongos. def connect_to_new_mongos(): try: return client.db.command('ping') except AutoReconnect: pass wait_until(connect_to_new_mongos, 'connect to a new mongos')
def test_local_threshold(self): client = connected(self.mock_client(localThresholdMS=30)) self.assertEqual(30, client.local_threshold_ms) wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') topology = client._topology # All are within a 30-ms latency window, see self.mock_client(). self.assertEqual(set([('a', 1), ('b', 2), ('c', 3)]), writable_addresses(topology)) # No error client.admin.command('ismaster') client = connected(self.mock_client(localThresholdMS=0)) self.assertEqual(0, client.local_threshold_ms) # No error client.db.command('ismaster') # Our chosen mongos goes down. client.kill_host('%s:%s' % next(iter(client.nodes))) try: client.db.command('ismaster') except: pass # We eventually connect to a new mongos. def connect_to_new_mongos(): try: return client.db.command('ismaster') except AutoReconnect: pass wait_until(connect_to_new_mongos, 'connect to a new mongos')
def wait_for_event(self, op): """Run the 'waitForEvent' operation.""" event = OBJECT_TYPES[op['event']] count = op['count'] timeout = op.get('timeout', 10000) / 1000.0 wait_until(lambda: self.listener.event_count(event) >= count, 'find %s %s event(s)' % (count, event), timeout=timeout)
def run_test(self): if not _HAVE_DNSPYTHON: raise unittest.SkipTest("DNS tests require the dnspython module") uri = test_case['uri'] seeds = test_case['seeds'] hosts = test_case['hosts'] options = test_case.get('options') if seeds: seeds = split_hosts(','.join(seeds)) if hosts: hosts = frozenset(split_hosts(','.join(hosts))) if options: for key, value in options.items(): # Convert numbers to strings for comparison options[key] = str(value) if seeds: result = parse_uri(uri, validate=False) self.assertEqual(sorted(result['nodelist']), sorted(seeds)) if options: self.assertEqual(result['options'], options) hostname = next(iter(client_context.client.nodes))[0] # The replica set members must be configured as 'localhost'. if hostname == 'localhost': client = MongoClient(uri, **_SSL_OPTS) # Force server selection client.admin.command('ismaster') wait_until( lambda: hosts == client.nodes, 'match test hosts to client nodes') else: self.assertRaises( ConfigurationError, parse_uri, uri, validate=False)
def test_wire_version(self): c = MockClient( standalones=[], members=['a:1', 'b:2', 'c:3'], mongoses=[], host='a:1', replicaSet='rs', connect=False) c.set_wire_version_range('a:1', 1, 5) c.set_wire_version_range('b:2', 0, 1) c.set_wire_version_range('c:3', 1, 2) c.db.command('ismaster') # Connect. c.set_wire_version_range('a:1', 2, 2) # A secondary doesn't overlap with us. c.set_wire_version_range('b:2', 5, 6) def raises_configuration_error(): try: c.db.collection.find_one() return False except ConfigurationError: return True wait_until(raises_configuration_error, 'notice we are incompatible with server') self.assertRaises(ConfigurationError, c.db.collection.insert_one, {})
def test_read_with_failover(self): c = MongoClient( self.seed, replicaSet=self.name, serverSelectionTimeoutMS=self.server_selection_timeout) wait_until(lambda: c.primary, "discover primary") wait_until(lambda: len(c.secondaries) == 2, "discover secondaries") def iter_cursor(cursor): for _ in cursor: pass return True w = len(c.secondaries) + 1 db = c.get_database("pymongo_test", write_concern=WriteConcern(w=w)) db.test.delete_many({}) # Force replication db.test.insert_many([{'foo': i} for i in xrange(10)]) self.assertEqual(10, db.test.count()) db.read_preference = SECONDARY_PREFERRED cursor = db.test.find().batch_size(5) next(cursor) self.assertEqual(5, cursor._Cursor__retrieved) self.assertTrue(cursor.address in c.secondaries) ha_tools.kill_primary() # Primary failure shouldn't interrupt the cursor self.assertTrue(iter_cursor(cursor)) self.assertEqual(10, cursor._Cursor__retrieved)
def test_kill_cursors_with_tuple(self): if (client_context.is_mongos and not client_context.version.at_least(2, 4, 7)): # Old mongos sends incorrectly formatted error response when # cursor isn't found, see SERVER-9738. raise SkipTest("Can't test kill_cursors against old mongos") self.collection = self.client.pymongo_test.test self.collection.drop() self.collection.insert_many([{'_id': i} for i in range(200)]) cursor = self.collection.find().batch_size(1) next(cursor) self.client.kill_cursors( [cursor.cursor_id], self.client.address) # Prevent killcursors from reaching the server while a getmore is in # progress -- the server logs "Assertion: 16089:Cannot kill active # cursor." time.sleep(2) def raises_cursor_not_found(): try: next(cursor) return False except CursorNotFound: return True wait_until(raises_cursor_not_found, 'close cursor')
def test_discover_primary(self): # Disable background refresh. with client_knobs(heartbeat_frequency=999999): c = MockClient( standalones=[], members=['a:1', 'b:2', 'c:3'], mongoses=[], host='b:2', # Pass a secondary. replicaSet='rs') wait_until(lambda: len(c.nodes) == 3, 'connect') self.assertEqual(c.address, ('a', 1)) # Fail over. c.kill_host('a:1') c.mock_primary = 'b:2' c.close() self.assertEqual(0, len(c.nodes)) t = c._get_topology() t.select_servers(writable_server_selector) # Reconnect. self.assertEqual(c.address, ('b', 2)) # a:1 not longer in nodes. self.assertLess(len(c.nodes), 3) # c:3 is rediscovered. t.select_server_by_address(('c', 3))
def test_kill_cursors_with_tuple(self): if (client_context.is_mongos and not client_context.version.at_least(2, 4, 7)): # Old mongos sends incorrectly formatted error response when # cursor isn't found, see SERVER-9738. raise SkipTest("Can't test kill_cursors against old mongos") self.collection = self.client.pymongo_test.test self.collection.drop() self.collection.insert_many([{'_id': i} for i in range(200)]) cursor = self.collection.find().batch_size(1) next(cursor) self.client.kill_cursors([cursor.cursor_id], self.client.address) # Prevent killcursors from reaching the server while a getmore is in # progress -- the server logs "Assertion: 16089:Cannot kill active # cursor." time.sleep(2) def raises_cursor_not_found(): try: next(cursor) return False except CursorNotFound: return True wait_until(raises_cursor_not_found, 'close cursor')
def test_load_balancing(self): # Although the server selection JSON tests already prove that # select_servers works for sharded topologies, here we do an end-to-end # test of discovering servers' round trip times and configuring # localThresholdMS. client = connected(self.mock_client()) wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') # Prohibited for topology type Sharded. with self.assertRaises(InvalidOperation): client.address topology = client._topology self.assertEqual(TOPOLOGY_TYPE.Sharded, topology.description.topology_type) # a and b are within the 15-ms latency window, see self.mock_client(). self.assertEqual(set([('a', 1), ('b', 2)]), writable_addresses(topology)) client.mock_rtts['a:1'] = 0.040 # Discover only b is within latency window. wait_until(lambda: set([('b', 2)]) == writable_addresses(topology), 'discover server "a" is too far')
def test_ipv6(self): if client_context.ssl: if not HAVE_IPADDRESS: raise SkipTest("Need the ipaddress module to test with SSL") port = client_context.port c = rs_client("mongodb://[::1]:%d" % (port, )) # Client switches to IPv4 once it has first ismaster response. msg = 'discovered primary with IPv4 address "%r"' % (self.primary, ) wait_until(lambda: c.primary == self.primary, msg) # Same outcome with both IPv4 and IPv6 seeds. c = rs_client("mongodb://[::1]:%d,localhost:%d" % (port, port)) wait_until(lambda: c.primary == self.primary, msg) if client_context.auth_enabled: auth_str = "%s:%s@" % (db_user, db_pwd) else: auth_str = "" uri = "mongodb://%slocalhost:%d,[::1]:%d" % (auth_str, port, port) client = rs_client(uri) client.pymongo_test.test.insert_one({"dummy": u"object"}) client.pymongo_test_bernie.test.insert_one({"dummy": u"object"}) dbs = client.list_database_names() self.assertTrue("pymongo_test" in dbs) self.assertTrue("pymongo_test_bernie" in dbs) client.close()
def test_local_threshold(self): client = connected(self.mock_client(localThresholdMS=30)) self.assertEqual(30, client.local_threshold_ms) wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') topology = client._topology # All are within a 30-ms latency window, see self.mock_client(). self.assertEqual(set([('a', 1), ('b', 2), ('c', 3)]), writable_addresses(topology)) # No error client.db.collection.find_one() client = connected(self.mock_client(localThresholdMS=0)) self.assertEqual(0, client.local_threshold_ms) # No error client.db.collection.find_one() # Our chosen mongos goes down. client.kill_host('%s:%s' % next(iter(client.nodes))) try: client.db.collection.find_one() except: pass # No error client.db.collection.find_one()
def test_unpin_for_non_transaction_operation(self): # Increase localThresholdMS and wait until both nodes are discovered # to avoid false positives. client = rs_client(client_context.mongos_seeds(), localThresholdMS=1000) wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") coll = client.test.test # Create the collection. coll.insert_one({}) self.addCleanup(client.close) with client.start_session() as s: # Session is pinned to Mongos. with s.start_transaction(): coll.insert_one({}, session=s) addresses = set() for _ in range(UNPIN_TEST_MAX_ATTEMPTS): cursor = coll.find({}, session=s) self.assertTrue(next(cursor)) addresses.add(cursor.address) # Break early if we can. if len(addresses) > 1: break self.assertGreater(len(addresses), 1)
def test_load_balancing(self): listener = OvertCommandListener() # PYTHON-2584: Use a large localThresholdMS to avoid the impact of # varying RTTs. client = rs_client(client_context.mongos_seeds(), appName='loadBalancingTest', event_listeners=[listener], localThresholdMS=10000) self.addCleanup(client.close) wait_until(lambda: len(client.nodes) == 2, 'discover both nodes') # Delay find commands on delay_finds = { 'configureFailPoint': 'failCommand', 'mode': { 'times': 10000 }, 'data': { 'failCommands': ['find'], 'blockConnection': True, 'blockTimeMS': 500, 'appName': 'loadBalancingTest', }, } with self.fail_point(delay_finds): nodes = client_context.client.nodes self.assertEqual(len(nodes), 1) delayed_server = next(iter(nodes)) freqs = self.frequencies(client, listener) self.assertLessEqual(freqs[delayed_server], 0.25) listener.reset() freqs = self.frequencies(client, listener) self.assertAlmostEqual(freqs[delayed_server], 0.50, delta=0.15)
def test_load_balancing(self): # Although the server selection JSON tests already prove that # select_servers works for sharded topologies, here we do an end-to-end # test of discovering servers' round trip times and configuring # localThresholdMS. client = connected(self.mock_client()) wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') # Prohibited for topology type Sharded. with self.assertRaises(InvalidOperation): client.address topology = client._topology self.assertEqual(TOPOLOGY_TYPE.Sharded, topology.description.topology_type) # a and b are within the 15-ms latency window, see self.mock_client(). self.assertEqual(set([('a', 1), ('b', 2)]), writable_addresses(topology)) client.mock_rtts['a:1'] = 0.045 # Discover only b is within latency window. wait_until(lambda: set([('b', 2)]) == writable_addresses(topology), 'discover server "a" is too far')
def wait_for_event(self, event, count): """Run the waitForEvent test operation. Wait for a number of events to be published, or fail. """ wait_until(lambda: self._event_count(event) >= count, 'find %s %s event(s)' % (count, event))
def test_unacknowledged_writes(self): # Ensure the collection exists. self.client.pymongo_test.test_unacked_writes.insert_one({}) client = rs_or_single_client(w=0, event_listeners=[self.listener]) self.addCleanup(client.close) db = client.pymongo_test coll = db.test_unacked_writes ops = [ (client.drop_database, [db.name], {}), (db.create_collection, ['collection'], {}), (db.drop_collection, ['collection'], {}), ] ops.extend(self.collection_write_ops(coll)) self._test_unacknowledged_ops(client, *ops) def drop_db(): try: self.client.drop_database(db.name) return True except OperationFailure as exc: # Try again on BackgroundOperationInProgressForDatabase and # BackgroundOperationInProgressForNamespace. if exc.code in (12586, 12587): return False raise wait_until(drop_db, 'dropped database after w=0 writes')
def test_failover(self): nthreads = 10 client = connected(self.mock_client(localThresholdMS=0.001)) wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') # Our chosen mongos goes down. client.kill_host('a:1') # Trigger failover to higher-latency nodes. AutoReconnect should be # raised at most once in each thread. passed = [] def f(): try: client.db.command('ismaster') except AutoReconnect: # Second attempt succeeds. client.db.command('ismaster') passed.append(True) threads = [threading.Thread(target=f) for _ in range(nthreads)] for t in threads: t.start() for t in threads: t.join() self.assertEqual(nthreads, len(passed)) # Down host removed from list. self.assertEqual(2, len(client.nodes))
def test_ipv6(self): c = MongoClient("mongodb://[::1]:%d" % (port,), replicaSet=self.name) # Client switches to IPv4 once it has first ismaster response. msg = 'discovered primary with IPv4 address "%r"' % (self.primary,) wait_until(lambda: c.primary == self.primary, msg) # Same outcome with both IPv4 and IPv6 seeds. c = MongoClient("[::1]:%d,localhost:%d" % (port, port), replicaSet=self.name) wait_until(lambda: c.primary == self.primary, msg) if client_context.auth_enabled: auth_str = "%s:%s@" % (db_user, db_pwd) else: auth_str = "" uri = "mongodb://%slocalhost:%d,[::1]:%d" % (auth_str, port, port) client = MongoClient(uri, replicaSet=self.name) client.pymongo_test.test.insert_one({"dummy": u("object")}) client.pymongo_test_bernie.test.insert_one({"dummy": u("object")}) dbs = client.database_names() self.assertTrue("pymongo_test" in dbs) self.assertTrue("pymongo_test_bernie" in dbs) client.close()
def test_client(self): c = MockClient(standalones=[], members=['a:1', 'b:2', 'c:3'], mongoses=[], host='a:1,b:2,c:3', replicaSet='rs', serverSelectionTimeoutMS=100) self.addCleanup(c.close) # MongoClient connects to primary by default. wait_until(lambda: c.address is not None, 'connect to primary') self.assertEqual(c.address, ('a', 1)) # C is brought up as a standalone. c.mock_members.remove('c:3') c.mock_standalones.append('c:3') # Fail over. c.kill_host('a:1') c.kill_host('b:2') # Force reconnect. c.close() with self.assertRaises(AutoReconnect): c.db.command('ismaster') self.assertEqual(c.address, None)
def _test_auto_encrypt(self, opts): client = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client.close) # Create the encrypted field's data key. key_vault = create_key_vault( self.client.admin.datakeys, json_data('custom', 'key-document-local.json')) self.addCleanup(key_vault.drop) # Collection.insert_one/insert_many auto encrypts. docs = [{'_id': 0, 'ssn': '000'}, {'_id': 1, 'ssn': '111'}, {'_id': 2, 'ssn': '222'}, {'_id': 3, 'ssn': '333'}, {'_id': 4, 'ssn': '444'}, {'_id': 5, 'ssn': '555'}] encrypted_coll = client.pymongo_test.test encrypted_coll.insert_one(docs[0]) encrypted_coll.insert_many(docs[1:3]) unack = encrypted_coll.with_options(write_concern=WriteConcern(w=0)) unack.insert_one(docs[3]) unack.insert_many(docs[4:], ordered=False) wait_until(lambda: self.db.test.count_documents({}) == len(docs), 'insert documents with w=0') # Database.command auto decrypts. res = client.pymongo_test.command( 'find', 'test', filter={'ssn': '000'}) decrypted_docs = res['cursor']['firstBatch'] self.assertEqual(decrypted_docs, [{'_id': 0, 'ssn': '000'}]) # Collection.find auto decrypts. decrypted_docs = list(encrypted_coll.find()) self.assertEqual(decrypted_docs, docs) # Collection.find auto decrypts getMores. decrypted_docs = list(encrypted_coll.find(batch_size=1)) self.assertEqual(decrypted_docs, docs) # Collection.aggregate auto decrypts. decrypted_docs = list(encrypted_coll.aggregate([])) self.assertEqual(decrypted_docs, docs) # Collection.aggregate auto decrypts getMores. decrypted_docs = list(encrypted_coll.aggregate([], batchSize=1)) self.assertEqual(decrypted_docs, docs) # Collection.distinct auto decrypts. decrypted_ssns = encrypted_coll.distinct('ssn') self.assertEqual(set(decrypted_ssns), set(d['ssn'] for d in docs)) # Make sure the field is actually encrypted. for encrypted_doc in self.db.test.find(): self.assertIsInstance(encrypted_doc['_id'], int) self.assertEncrypted(encrypted_doc['ssn']) # Attempt to encrypt an unencodable object. with self.assertRaises(BSONError): encrypted_coll.insert_one({'unencodeable': object()})
def test_client(self): c = MockClient(standalones=[], members=['a:1', 'b:2', 'c:3'], mongoses=[], host='a:1,b:2,c:3', replicaSet='rs', serverSelectionTimeoutMS=100, connect=False) self.addCleanup(c.close) # C is brought up as a standalone. c.mock_members.remove('c:3') c.mock_standalones.append('c:3') # Fail over. c.kill_host('a:1') c.kill_host('b:2') with self.assertRaises(ServerSelectionTimeoutError): c.db.command('ping') self.assertEqual(c.address, None) # Client can still discover the primary node c.revive_host('a:1') wait_until(lambda: c.address is not None, 'connect to primary') self.assertEqual(c.address, ('a', 1))
def test_local_threshold(self): client = connected(self.mock_client(localThresholdMS=30)) wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') topology = client._topology # All are within a 30-ms latency window, see self.mock_client(). self.assertEqual(set([('a', 1), ('b', 2), ('c', 3)]), writable_addresses(topology))
def assertReadsFrom(self, expected, **kwargs): c = rs_client(**kwargs) wait_until(lambda: len(c.nodes - c.arbiters) == self.w, "discovered all nodes") used = self.read_from_which_kind(c) self.assertEqual(expected, used, 'Cursor used %s, expected %s' % (used, expected))
def test_bool(self): client = single_client() wait_until(lambda: client.address, "discover primary") selection = Selection.from_topology_description( client._topology.description) self.assertTrue(selection) self.assertFalse(selection.with_server_descriptions([]))
def run_scenario(self): responses = (r for r in scenario_def['phases'][0]['responses']) with client_knobs(events_queue_frequency=0.1, heartbeat_frequency=0.1, min_heartbeat_interval=0.1): class MockMonitor(Monitor): """Override the _run method""" def _run(self): try: if self._server_description.address != ('a', 27017): # Because PyMongo doesn't keep information about # the order of addresses, we might accidentally # start a MockMonitor on the wrong server first, # so we need to only mock responses for the server # the test's response is supposed to come from. return response = next(responses)[1] isMaster = IsMaster(response) self._server_description = ServerDescription( address=self._server_description.address, ismaster=isMaster) self._topology.on_change(self._server_description) except (ReferenceError, StopIteration): # Topology was garbage-collected. self.close() m = single_client(h=scenario_def['uri'], p=27017, event_listeners=(self.all_listener,), _monitor_class=MockMonitor) expected_results = scenario_def['phases'][0]['outcome']['events'] expected_len = len(expected_results) wait_until(lambda: len(self.all_listener.results) >= expected_len, "publish all events", timeout=15) try: i = 0 while i < expected_len: result = self.all_listener.results[i] if len( self.all_listener.results) > i else None # The order of ServerOpening/ClosedEvents doesn't matter if (isinstance(result, monitoring.ServerOpeningEvent) or isinstance(result, monitoring.ServerClosedEvent)): i, passed, message = compare_multiple_events( i, expected_results, self.all_listener.results) self.assertTrue(passed, message) else: self.assertTrue( *compare_events(expected_results[i], result)) i += 1 finally: m.close()
def test_no_results_unordered_success(self): batch = self.coll.initialize_unordered_bulk_op() batch.insert({'_id': 1}) batch.find({'_id': 3}).upsert().update_one({'$set': {'b': 1}}) batch.insert({'_id': 2}) batch.find({'_id': 1}).remove_one() self.assertTrue(batch.execute({'w': 0}) is None) wait_until(lambda: 2 == self.coll.count(), 'insert 2 documents')
def test_round_trip_time(self): round_trip_time = 125 available = True class TestMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): if available: return (IsMaster({ 'ok': 1, 'maxWireVersion': 6 }), round_trip_time) else: raise AutoReconnect('mock monitor error') t = create_mock_topology(monitor_class=TestMonitor) self.addCleanup(t.close) s = t.select_server(writable_server_selector) self.assertEqual(125, s.description.round_trip_time) round_trip_time = 25 t.request_check_all() # Exponential weighted average: .8 * 125 + .2 * 25 = 105. self.assertAlmostEqual(105, s.description.round_trip_time) # The server is temporarily down. available = False t.request_check_all() def raises_err(): try: t.select_server(writable_server_selector, server_selection_timeout=0.1) except ConnectionFailure: return True else: return False wait_until(raises_err, 'discover server is down') self.assertIsNone(s.description.round_trip_time) # Bring it back, RTT is now 20 milliseconds. available = True round_trip_time = 20 def new_average(): # We reset the average to the most recent measurement. description = s.description return (description.round_trip_time is not None and round(abs(20 - description.round_trip_time), 7) == 0) tries = 0 while not new_average(): t.request_check_all() tries += 1 if tries > 10: self.fail("Didn't ever calculate correct new average")
def assertReadsFrom(self, expected, **kwargs): c = rs_client(**kwargs) wait_until( lambda: len(c.nodes - c.arbiters) == self.w, "discovered all nodes") used = self.read_from_which_kind(c) self.assertEqual(expected, used, 'Cursor used %s, expected %s' % ( used, expected))
def test_insert_large_batch(self): # Tests legacy insert. db = self.client.test_insert_large_batch self.addCleanup(self.client.drop_database, 'test_insert_large_batch') max_bson_size = self.client.max_bson_size if client_context.version.at_least(2, 5, 4, -1): # Write commands are limited to 16MB + 16k per batch big_string = 'x' * int(max_bson_size / 2) else: big_string = 'x' * (max_bson_size - 100) # Batch insert that requires 2 batches. successful_insert = [{'x': big_string}, {'x': big_string}, {'x': big_string}, {'x': big_string}] db.collection_0.insert(successful_insert, w=1) self.assertEqual(4, db.collection_0.count()) # Test that inserts fail after first error. insert_second_fails = [{'_id': 'id0', 'x': big_string}, {'_id': 'id0', 'x': big_string}, {'_id': 'id1', 'x': big_string}, {'_id': 'id2', 'x': big_string}] with self.assertRaises(DuplicateKeyError): db.collection_1.insert(insert_second_fails) self.assertEqual(1, db.collection_1.count()) # 2 batches, 2nd insert fails, don't continue on error. self.assertTrue(db.collection_2.insert(insert_second_fails, w=0)) wait_until(lambda: 1 == db.collection_2.count(), 'insert 1 document', timeout=60) # 2 batches, ids of docs 0 and 1 are dupes, ids of docs 2 and 3 are # dupes. Acknowledged, continue on error. insert_two_failures = [{'_id': 'id0', 'x': big_string}, {'_id': 'id0', 'x': big_string}, {'_id': 'id1', 'x': big_string}, {'_id': 'id1', 'x': big_string}] with self.assertRaises(OperationFailure) as context: db.collection_3.insert(insert_two_failures, continue_on_error=True, w=1) self.assertIn('id1', str(context.exception)) # Only the first and third documents should be inserted. self.assertEqual(2, db.collection_3.count()) # 2 batches, 2 errors, unacknowledged, continue on error. db.collection_4.insert(insert_two_failures, continue_on_error=True, w=0) # Only the first and third documents are inserted. wait_until(lambda: 2 == db.collection_4.count(), 'insert 2 documents', timeout=60)
def test_lazy_connect(self): # While connected() ensures we can trigger connection from the main # thread and wait for the monitors, this test triggers connection from # several threads at once to check for data races. nthreads = 10 client = self.mock_client() self.assertEqual(0, len(client.nodes)) # Trigger initial connection. do_simple_op(client, nthreads) wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses')
def test_round_trip_time(self): round_trip_time = 125 available = True class TestMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): if available: return (IsMaster({'ok': 1, 'maxWireVersion': 6}), round_trip_time) else: raise AutoReconnect('mock monitor error') t = create_mock_topology(monitor_class=TestMonitor) s = t.select_server(writable_server_selector) self.assertEqual(125, s.description.round_trip_time) round_trip_time = 25 t.request_check_all() # Exponential weighted average: .8 * 125 + .2 * 25 = 105. self.assertAlmostEqual(105, s.description.round_trip_time) # The server is temporarily down. available = False t.request_check_all() def raises_err(): try: t.select_server(writable_server_selector, server_selection_timeout=0.1) except ConnectionFailure: return True else: return False wait_until(raises_err, 'discover server is down') self.assertIsNone(s.description.round_trip_time) # Bring it back, RTT is now 20 milliseconds. available = True round_trip_time = 20 def new_average(): # We reset the average to the most recent measurement. description = s.description return (description.round_trip_time is not None and round(abs(20 - description.round_trip_time), 7) == 0) tries = 0 while not new_average(): t.request_check_all() tries += 1 if tries > 10: self.fail("Didn't ever calculate correct new average")
def test_atexit_hook(self): client = single_client(client_context.host, client_context.port) executor = one(client._topology._servers.values())._monitor._executor connected(client) # The executor stores a weakref to itself in _EXECUTORS. ref = one([r for r in _EXECUTORS.copy() if r() is executor]) del executor del client wait_until(partial(unregistered, ref), 'unregister executor', timeout=5)
def run_test(self): if not _HAVE_DNSPYTHON: raise unittest.SkipTest("DNS tests require the dnspython module") uri = test_case['uri'] seeds = test_case['seeds'] hosts = test_case['hosts'] options = test_case.get('options') if seeds: seeds = split_hosts(','.join(seeds)) if hosts: hosts = frozenset(split_hosts(','.join(hosts))) if options: for key, value in options.items(): # Convert numbers / booleans to strings for comparison if isinstance(value, bool): options[key] = 'true' if value else 'false' elif isinstance(value, (int, float)): options[key] = str(value) if seeds: result = parse_uri(uri, validate=False) self.assertEqual(sorted(result['nodelist']), sorted(seeds)) if options: opts = result['options'] if 'readpreferencetags' in opts: rpts = validate_read_preference_tags( 'readPreferenceTags', opts.pop('readpreferencetags')) opts['readPreferenceTags'] = rpts self.assertEqual(result['options'], options) hostname = next(iter(client_context.client.nodes))[0] # The replica set members must be configured as 'localhost'. if hostname == 'localhost': copts = client_context.default_client_options.copy() if client_context.ssl is True: # Our test certs don't support the SRV hosts used in these tests. copts['ssl_match_hostname'] = False client = MongoClient(uri, **copts) # Force server selection client.admin.command('ismaster') wait_until( lambda: hosts == client.nodes, 'match test hosts to client nodes') else: try: parse_uri(uri) except (ConfigurationError, ValueError): pass else: self.fail("failed to raise an exception")
def test_reconnect(self): nthreads = 10 client = connected(self.mock_client()) # connected() ensures we've contacted at least one mongos. Wait for # all of them. wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') # Trigger reconnect. client.close() do_simple_op(client, nthreads) wait_until(lambda: len(client.nodes) == 3, 'reconnect to all mongoses')
def test_stepdown_triggers_refresh(self): c_find_one = MongoClient( self.seed, replicaSet=self.name, serverSelectionTimeoutMS=self.server_selection_timeout) c_count = MongoClient( self.seed, replicaSet=self.name, serverSelectionTimeoutMS=self.server_selection_timeout) # We've started the primary and one secondary wait_until(lambda: len(c_find_one.secondaries), "discover secondary") wait_until(lambda: len(c_count.secondaries), "discover secondary") ha_tools.stepdown_primary() # Trigger a refresh, both with a cursor and a command. self.assertRaises(AutoReconnect, c_find_one.test.test.find_one) self.assertRaises(AutoReconnect, c_count.test.command, 'count') # Both clients detect the stepdown *AND* re-check the server # immediately, they don't just mark it Unknown. Wait for the # immediate refresh to complete - we're not waiting for the # periodic refresh, which has been disabled wait_until(lambda: len(c_find_one.secondaries) == 2, "detect two secondaries") wait_until(lambda: len(c_count.secondaries) == 2, "detect two secondaries")
def test_repr(self): with ignore_deprecations(): client = MongoReplicaSetClient(host, port, replicaSet=self.name) wait_until(lambda: client.primary == self.primary, "discover primary") wait_until(lambda: client.secondaries == self.secondaries, "discover secondaries") # repr should be something like # MongoReplicaSetClient(["localhost:27017", "localhost:27018"]). self.assertIn("MongoReplicaSetClient([", repr(client)) for h in self.hosts: self.assertIn("%s:%d" % h, repr(client))
def f(pipe): try: kill_cursors_executor = self.client._kill_cursors_executor servers = self.client._topology.select_servers(any_server_selector) # In child, only the thread that called fork() is alive. # The first operation should revive the rest. db.test.find_one() wait_until( lambda: all(s._monitor._executor._thread.is_alive() for s in servers), "restart monitor threads" ) wait_until(lambda: kill_cursors_executor._thread.is_alive(), "restart kill-cursors executor") except: traceback.print_exc() # Aid debugging. pipe.send(True)
def test_min_pool_size(self): with client_knobs(kill_cursor_frequency=.1): client = MongoClient(host, port) server = client._get_topology().select_server(any_server_selector) self.assertEqual(0, len(server._pool.sockets)) # Assert that pool started up at minPoolSize client = MongoClient(host, port, minPoolSize=10) server = client._get_topology().select_server(any_server_selector) wait_until(lambda: 10 == len(server._pool.sockets), "pool initialized with 10 sockets") # Assert that if a socket is closed, a new one takes its place with server._pool.get_socket({}) as sock_info: sock_info.close() wait_until(lambda: 10 == len(server._pool.sockets), "a closed socket gets replaced from the pool") self.assertFalse(sock_info in server._pool.sockets)