def test_discover_primary(self): # Disable background refresh. with client_knobs(heartbeat_frequency=999999): c = MockClient( standalones=[], members=['a:1', 'b:2', 'c:3'], mongoses=[], host='b:2', # Pass a secondary. replicaSet='rs') wait_until(lambda: len(c.nodes) == 3, 'connect') self.assertEqual(c.address, ('a', 1)) # Fail over. c.kill_host('a:1') c.mock_primary = 'b:2' c.close() self.assertEqual(0, len(c.nodes)) t = c._get_topology() t.select_servers(writable_server_selector) # Reconnect. self.assertEqual(c.address, ('b', 2)) # a:1 not longer in nodes. self.assertLess(len(c.nodes), 3) # c:3 is rediscovered. t.select_server_by_address(('c', 3))
def test_max_idle_time_reaper(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper doesn't remove sockets when maxIdleTimeMS not set client = MongoClient(host, port) server = client._get_topology().select_server(any_server_selector) with server._pool.get_socket({}) as sock_info: pass self.assertEqual(1, len(server._pool.sockets)) self.assertTrue(sock_info in server._pool.sockets) # Assert reaper removes idle socket and replaces it with a new one client = MongoClient(host, port, maxIdleTimeMS=.5, minPoolSize=1) server = client._get_topology().select_server(any_server_selector) with server._pool.get_socket({}) as sock_info: pass self.assertEqual(1, len(server._pool.sockets)) wait_until(lambda: sock_info not in server._pool.sockets, "reaper removes stale socket eventually") wait_until(lambda: 1 == len(server._pool.sockets), "reaper replaces stale socket with new one") # Assert reaper has removed idle socket and NOT replaced it client = MongoClient(host, port, maxIdleTimeMS=.5) server = client._get_topology().select_server(any_server_selector) with server._pool.get_socket({}): pass wait_until( lambda: 0 == len(server._pool.sockets), "stale socket reaped and new one NOT added to the pool")
def test_timeout_does_not_mark_member_down(self): # If a query times out, the client shouldn't mark the member "down". # Disable background refresh. with client_knobs(heartbeat_frequency=999999): c = rs_client(socketTimeoutMS=3000, w=self.w) collection = c.pymongo_test.test collection.insert_one({}) # Query the primary. self.assertRaises(NetworkTimeout, collection.find_one, {'$where': delay(5)}) self.assertTrue(c.primary) collection.find_one() # No error. coll = collection.with_options( read_preference=ReadPreference.SECONDARY) # Query the secondary. self.assertRaises(NetworkTimeout, coll.find_one, {'$where': delay(5)}) self.assertTrue(c.secondaries) # No error. coll.find_one()
def setUpClass(cls): super(SpecRunner, cls).setUpClass() cls.mongos_clients = [] # Speed up the tests by decreasing the heartbeat frequency. cls.knobs = client_knobs(min_heartbeat_interval=0.1) cls.knobs.enable()
def test_max_idle_time_checkout(self): # Use high frequency to test _get_socket_no_auth. with client_knobs(kill_cursor_frequency=99999999): client = MongoClient(host, port, maxIdleTimeMS=.5) server = client._get_topology().select_server(any_server_selector) with server._pool.get_socket({}) as sock_info: pass self.assertEqual(1, len(server._pool.sockets)) time.sleep(1) # Sleep so that the socket becomes stale. with server._pool.get_socket({}) as new_sock_info: self.assertNotEqual(sock_info, new_sock_info) self.assertEqual(1, len(server._pool.sockets)) self.assertFalse(sock_info in server._pool.sockets) self.assertTrue(new_sock_info in server._pool.sockets) # Test that sockets are reused if maxIdleTimeMS is not set. client = MongoClient(host, port) server = client._get_topology().select_server(any_server_selector) with server._pool.get_socket({}) as sock_info: pass self.assertEqual(1, len(server._pool.sockets)) time.sleep(1) with server._pool.get_socket({}) as new_sock_info: self.assertEqual(sock_info, new_sock_info) self.assertEqual(1, len(server._pool.sockets))
def test_discover_primary(self): # Disable background refresh. with client_knobs(heartbeat_frequency=999999): c = MockClient( standalones=[], members=["a:1", "b:2", "c:3"], mongoses=[], host="b:2", # Pass a secondary. replicaSet="rs", ) wait_until(lambda: len(c.nodes) == 3, "connect") self.assertEqual(c.address, ("a", 1)) # Fail over. c.kill_host("a:1") c.mock_primary = "b:2" c.close() self.assertEqual(0, len(c.nodes)) t = c._get_topology() t.select_servers(writable_server_selector) # Reconnect. self.assertEqual(c.address, ("b", 2)) # a:1 not longer in nodes. self.assertLess(len(c.nodes), 3) # c:3 is rediscovered. t.select_server_by_address(("c", 3))
def test_cursor_manager(self): self.close_was_called = False test_case = self class CM(CursorManager): def __init__(self, client): super(CM, self).__init__(client) def close(self, cursor_id, address): test_case.close_was_called = True super(CM, self).close(cursor_id, address) with client_knobs(kill_cursor_frequency=0.01): client = rs_or_single_client(maxPoolSize=1) client.set_cursor_manager(CM) # Create a cursor on the same client so we're certain the getMore # is sent after the killCursors message. cursor = client.pymongo_test.test.find().batch_size(1) next(cursor) client.close_cursor( cursor.cursor_id, _CursorAddress(self.client.address, self.collection.full_name)) def raises_cursor_not_found(): try: next(cursor) return False except CursorNotFound: return True wait_until(raises_cursor_not_found, 'close cursor') self.assertTrue(self.close_was_called)
def test_kill_cursors(self): with client_knobs(kill_cursor_frequency=0.01): self.client.pymongo_test.test.drop() self.client.pymongo_test.test.insert_many([{} for _ in range(10)]) cursor = self.client.pymongo_test.test.find().batch_size(5) next(cursor) cursor_id = cursor.cursor_id self.listener.results = {} cursor.close() time.sleep(2) results = self.listener.results started = results.get('started') succeeded = results.get('succeeded') self.assertIsNone(results.get('failed')) self.assertTrue( isinstance(started, monitoring.CommandStartedEvent)) # There could be more than one cursor_id here depending on # when the thread last ran. self.assertIn(cursor_id, started.command['cursors']) self.assertEqual('killCursors', started.command_name) self.assertEqual(cursor.address, started.connection_id) self.assertEqual('pymongo_test', started.database_name) self.assertTrue(isinstance(started.request_id, int)) self.assertTrue( isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) self.assertEqual('killCursors', succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertEqual(cursor.address, succeeded.connection_id) # There could be more than one cursor_id here depending on # when the thread last ran. self.assertIn(cursor_id, succeeded.reply['cursorsUnknown'])
def test_max_idle_time_reaper(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper doesn't remove sockets when maxIdleTimeMS not set client = MongoClient(host, port) server = client._get_topology().select_server(any_server_selector) with server._pool.get_socket({}) as sock_info: pass time.sleep(1) self.assertEqual(1, len(server._pool.sockets)) self.assertTrue(sock_info in server._pool.sockets) # Assert reaper removes idle socket and replaces it with a new one client = MongoClient(host, port, maxIdleTimeMS=.5, minPoolSize=1) server = client._get_topology().select_server(any_server_selector) with server._pool.get_socket({}) as sock_info: pass time.sleep(2) self.assertEqual(1, len(server._pool.sockets)) self.assertFalse(sock_info in server._pool.sockets) # Assert reaper has removed idle socket and NOT replaced it client = MongoClient(host, port, maxIdleTimeMS=.5) server = client._get_topology().select_server(any_server_selector) with server._pool.get_socket({}): pass time.sleep(1) self.assertEqual(0, len(server._pool.sockets))
def create_mock_monitor(self, responses, uri, expected_results): with client_knobs(heartbeat_frequency=0.1, events_queue_frequency=0.1): class MockMonitor(Monitor): def _check_with_socket(self, sock_info): if isinstance(responses[1], Exception): raise responses[1] return IsMaster(responses[1]), 99 m = single_client(h=uri, event_listeners=(self.all_listener,), _monitor_class=MockMonitor, _pool_class=MockPool ) expected_len = len(expected_results) wait_until(lambda: len(self.all_listener.results) == expected_len, "publish all events", timeout=15) try: for i in range(len(expected_results)): result = self.all_listener.results[i] if len( self.all_listener.results) > i else None self.assertEqual(expected_results[i], result.__class__.__name__) self.assertEqual(result.connection_id, responses[0]) if expected_results[i] != 'ServerHeartbeatStartedEvent': if isinstance(result.reply, IsMaster): self.assertEqual(result.duration, 99) self.assertEqual(result.reply._doc, responses[1]) else: self.assertEqual(result.reply, responses[1]) finally: m.close()
def _test_network_error(self, operation_callback): # Verify only the disconnected server is reset by a network failure. # Disable background refresh. with client_knobs(heartbeat_frequency=999999): c = MockClient( standalones=[], members=["a:1", "b:2"], mongoses=[], host="a:1", replicaSet="rs", connect=False ) # Set host-specific information so we can test whether it is reset. c.set_wire_version_range("a:1", 0, 1) c.set_wire_version_range("b:2", 0, 2) c._get_topology().select_servers(writable_server_selector) wait_until(lambda: len(c.nodes) == 2, "connect") c.kill_host("a:1") # MongoClient is disconnected from the primary. self.assertRaises(AutoReconnect, operation_callback, c) # The primary's description is reset. server_a = c._get_topology().get_server_by_address(("a", 1)) sd_a = server_a.description self.assertEqual(SERVER_TYPE.Unknown, sd_a.server_type) self.assertEqual(0, sd_a.min_wire_version) self.assertEqual(0, sd_a.max_wire_version) # ...but not the secondary's. server_b = c._get_topology().get_server_by_address(("b", 2)) sd_b = server_b.description self.assertEqual(SERVER_TYPE.RSSecondary, sd_b.server_type) self.assertEqual(0, sd_b.min_wire_version) self.assertEqual(2, sd_b.max_wire_version)
def test_timeout_does_not_mark_member_down(self): # If a query times out, the client shouldn't mark the member "down". # Disable background refresh. with client_knobs(heartbeat_frequency=999999): c = rs_client(socketTimeoutMS=3000, w=self.w) collection = c.pymongo_test.test collection.insert_one({}) # Query the primary. self.assertRaises( NetworkTimeout, collection.find_one, {'$where': delay(5)}) self.assertTrue(c.primary) collection.find_one() # No error. coll = collection.with_options( read_preference=ReadPreference.SECONDARY) # Query the secondary. self.assertRaises( NetworkTimeout, coll.find_one, {'$where': delay(5)}) self.assertTrue(c.secondaries) # No error. coll.find_one()
def setUpClass(cls): super(TestRetryableWritesMMAPv1, cls).setUpClass() # Speed up the tests by decreasing the heartbeat frequency. cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) cls.knobs.enable() cls.client = rs_or_single_client(retryWrites=True) cls.db = cls.client.pymongo_test
def setUpClass(cls): super(TestTransactions, cls).setUpClass() # Speed up tests by reducing SDAM waiting time after a network error. cls.knobs = client_knobs(min_heartbeat_interval=0.1) cls.knobs.enable() cls.mongos_clients = [] if client_context.supports_transactions(): for address in client_context.mongoses: cls.mongos_clients.append(single_client('%s:%s' % address))
def setUpClass(cls): super(TransactionsBase, cls).setUpClass() # Speed up tests by reducing SDAM waiting time after a network error. cls.knobs = client_knobs(min_heartbeat_interval=0.1) cls.knobs.enable() cls.mongos_clients = [] if client_context.supports_transactions(): for address in client_context.mongoses: cls.mongos_clients.append(single_client('%s:%s' % address))
def run_scenario(self): responses = (r for r in scenario_def['phases'][0]['responses']) with client_knobs(events_queue_frequency=0.1, heartbeat_frequency=0.1, min_heartbeat_interval=0.1): class MockMonitor(Monitor): """Override the _run method""" def _run(self): try: if self._server_description.address != ('a', 27017): # Because PyMongo doesn't keep information about # the order of addresses, we might accidentally # start a MockMonitor on the wrong server first, # so we need to only mock responses for the server # the test's response is supposed to come from. return response = next(responses)[1] isMaster = IsMaster(response) self._server_description = ServerDescription( address=self._server_description.address, ismaster=isMaster) self._topology.on_change(self._server_description) except (ReferenceError, StopIteration): # Topology was garbage-collected. self.close() m = single_client(h=scenario_def['uri'], p=27017, event_listeners=(self.all_listener,), _monitor_class=MockMonitor) expected_results = scenario_def['phases'][0]['outcome']['events'] expected_len = len(expected_results) wait_until(lambda: len(self.all_listener.results) >= expected_len, "publish all events", timeout=15) try: i = 0 while i < expected_len: result = self.all_listener.results[i] if len( self.all_listener.results) > i else None # The order of ServerOpening/ClosedEvents doesn't matter if (isinstance(result, monitoring.ServerOpeningEvent) or isinstance(result, monitoring.ServerClosedEvent)): i, passed, message = compare_multiple_events( i, expected_results, self.all_listener.results) self.assertTrue(passed, message) else: self.assertTrue( *compare_events(expected_results[i], result)) i += 1 finally: m.close()
def setUp(self): if not _HAVE_DNSPYTHON: raise unittest.SkipTest("SRV polling tests require the dnspython " "module") # Patch timeouts to ensure short rescan SRV interval. self.client_knobs = client_knobs(heartbeat_frequency=WAIT_TIME, min_heartbeat_interval=WAIT_TIME, events_queue_frequency=WAIT_TIME) self.client_knobs.enable()
def setUp(self): if self.enable_heartbeat: heartbeat_frequency = self.heartbeat_frequency else: # Disable periodic monitoring. heartbeat_frequency = 1e6 self.knobs = client_knobs(heartbeat_frequency=heartbeat_frequency) self.knobs.enable()
def setUpClass(cls): super(TestRetryableWrites, cls).setUpClass() # Speed up the tests by decreasing the heartbeat frequency. cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) cls.knobs.enable() cls.listener = OvertCommandListener() cls.client = rs_or_single_client( retryWrites=True, event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test
def setUpClass(cls): super(TestRetryableWrites, cls).setUpClass() # Speed up the tests by decreasing the heartbeat frequency. cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) cls.knobs.enable() cls.listener = OvertCommandListener() cls.client = rs_or_single_client(retryWrites=True, event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test
def setUpClass(cls): super(TestSdamMonitoring, cls).setUpClass() # Speed up the tests by decreasing the event publish frequency. cls.knobs = client_knobs(events_queue_frequency=0.1) cls.knobs.enable() cls.listener = ServerAndTopologyEventListener() retry_writes = client_context.supports_transactions() cls.test_client = rs_or_single_client( event_listeners=[cls.listener], retryWrites=retry_writes) cls.coll = cls.test_client[cls.client.db.name].test cls.coll.insert_one({})
def test_kill_cursors_with_server_unavailable(self): with client_knobs(kill_cursor_frequency=9999999): client = MongoClient("doesnt exist", connect=False, serverSelectionTimeoutMS=0) # Wait for the first tick of the periodic kill-cursors to pass. time.sleep(1) # Enqueue a kill-cursors message. client.close_cursor(1234, ("doesnt-exist", 27017)) with warnings.catch_warnings(record=True) as user_warnings: client._process_kill_cursors_queue() self.assertIn("couldn't close cursor on ('doesnt-exist', 27017)", str(user_warnings[0].message))
def test_kill_cursors_with_server_unavailable(self): with client_knobs(kill_cursor_frequency=9999999): client = MongoClient('doesnt exist', connect=False, serverSelectionTimeoutMS=0) # Wait for the first tick of the periodic kill-cursors to pass. time.sleep(1) # Enqueue a kill-cursors message. client.close_cursor(1234, ('doesnt-exist', 27017)) with warnings.catch_warnings(record=True) as user_warnings: client._process_kill_cursors_queue() self.assertIn("couldn't close cursor on ('doesnt-exist', 27017)", str(user_warnings[0].message))
def test_min_pool_size(self): with client_knobs(kill_cursor_frequency=.1): client = MongoClient(host, port) server = client._get_topology().select_server(any_server_selector) self.assertEqual(0, len(server._pool.sockets)) # Assert that pool started up at minPoolSize client = MongoClient(host, port, minPoolSize=10) server = client._get_topology().select_server(any_server_selector) wait_until(lambda: 10 == len(server._pool.sockets), "pool initialized with 10 sockets") # Assert that if a socket is closed, a new one takes its place with server._pool.get_socket({}) as sock_info: sock_info.close() wait_until(lambda: 10 == len(server._pool.sockets), "a closed socket gets replaced from the pool") self.assertFalse(sock_info in server._pool.sockets)
def test_min_pool_size(self): with client_knobs(kill_cursor_frequency=.1): client = MongoClient(host, port) server = client._get_topology().select_server(any_server_selector) time.sleep(1) self.assertEqual(0, len(server._pool.sockets)) # Assert that pool started up at minPoolSize client = MongoClient(host, port, minPoolSize=10) server = client._get_topology().select_server(any_server_selector) time.sleep(1) self.assertEqual(10, len(server._pool.sockets)) # Assert that if a socket is closed, a new one takes its place with server._pool.get_socket({}) as sock_info: sock_info.close() time.sleep(1) self.assertEqual(10, len(server._pool.sockets)) self.assertFalse(sock_info in server._pool.sockets)
def test_socket_error_marks_member_down(self): # Disable background refresh. with client_knobs(heartbeat_frequency=999999): c = MockClient(standalones=[], members=['a:1', 'b:2'], mongoses=[], host='a:1', replicaSet='rs') self.addCleanup(c.close) wait_until(lambda: len(c.nodes) == 2, 'discover both nodes') # b now raises socket.error. c.mock_down_hosts.append('b:2') self.assertRaises( ConnectionFailure, c.db.collection.with_options( read_preference=ReadPreference.SECONDARY).find_one) self.assertEqual(1, len(c.nodes))
def test_socket_error_marks_member_down(self): # Disable background refresh. with client_knobs(heartbeat_frequency=999999): c = MockClient( standalones=[], members=['a:1', 'b:2'], mongoses=[], host='a:1', replicaSet='rs') wait_until(lambda: len(c.nodes) == 2, 'discover both nodes') # b now raises socket.error. c.mock_down_hosts.append('b:2') self.assertRaises( ConnectionFailure, c.db.collection.with_options( read_preference=ReadPreference.SECONDARY).find_one) self.assertEqual(1, len(c.nodes))
def create_mock_monitor(self, responses, uri, expected_results): listener = HeartbeatEventListener() with client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1, events_queue_frequency=0.1): class MockMonitor(Monitor): def _check_with_socket(self, sock_info, metadata=None): if isinstance(responses[1], Exception): raise responses[1] return IsMaster(responses[1]), 99 m = single_client( h=uri, event_listeners=(listener,), _monitor_class=MockMonitor, _pool_class=MockPool) expected_len = len(expected_results) # Wait for *at least* expected_len number of results. The # monitor thread may run multiple times during the execution # of this test. wait_until( lambda: len(listener.results) >= expected_len, "publish all events") try: # zip gives us len(expected_results) pairs. for expected, actual in zip(expected_results, listener.results): self.assertEqual(expected, actual.__class__.__name__) self.assertEqual(actual.connection_id, responses[0]) if expected != 'ServerHeartbeatStartedEvent': if isinstance(actual.reply, IsMaster): self.assertEqual(actual.duration, 99) self.assertEqual(actual.reply._doc, responses[1]) else: self.assertEqual(actual.reply, responses[1]) finally: m.close()
def create_mock_monitor(self, responses, uri, expected_results): listener = HeartbeatEventListener() with client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1, events_queue_frequency=0.1): class MockMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): if isinstance(responses[1], Exception): raise responses[1] return Hello(responses[1]), 99 m = single_client( h=uri, event_listeners=(listener,), _monitor_class=MockMonitor, _pool_class=MockPool) expected_len = len(expected_results) # Wait for *at least* expected_len number of results. The # monitor thread may run multiple times during the execution # of this test. wait_until( lambda: len(listener.events) >= expected_len, "publish all events") try: # zip gives us len(expected_results) pairs. for expected, actual in zip(expected_results, listener.events): self.assertEqual(expected, actual.__class__.__name__) self.assertEqual(actual.connection_id, responses[0]) if expected != 'ServerHeartbeatStartedEvent': if isinstance(actual.reply, Hello): self.assertEqual(actual.duration, 99) self.assertEqual(actual.reply._doc, responses[1]) else: self.assertEqual(actual.reply, responses[1]) finally: m.close()
def test_cursor_manager(self): if (client_context.is_mongos and not client_context.version.at_least(2, 4, 7)): # Old mongos sends incorrectly formatted error response when # cursor isn't found, see SERVER-9738. raise SkipTest("Can't test kill_cursors against old mongos") self.close_was_called = False test_case = self class CM(CursorManager): def __init__(self, client): super(CM, self).__init__(client) def close(self, cursor_id, address): test_case.close_was_called = True super(CM, self).close(cursor_id, address) with client_knobs(kill_cursor_frequency=0.01): client = rs_or_single_client(maxPoolSize=1) client.set_cursor_manager(CM) # Create a cursor on the same client so we're certain the getMore # is sent after the killCursors message. cursor = client.pymongo_test.test.find().batch_size(1) next(cursor) client.close_cursor( cursor.cursor_id, _CursorAddress(self.client.address, self.collection.full_name)) def raises_cursor_not_found(): try: next(cursor) return False except CursorNotFound: return True wait_until(raises_cursor_not_found, 'close cursor') self.assertTrue(self.close_was_called)
def _test_kill_cursor_explicit(self, read_pref): with client_knobs(kill_cursor_frequency=0.01): c = rs_client(read_preference=read_pref, w=self.w) db = c.pymongo_test db.drop_collection("test") test = db.test test.insert_many([{"i": i} for i in range(20)]) # Partially evaluate cursor so it's left alive, then kill it cursor = test.find().batch_size(10) next(cursor) self.assertNotEqual(0, cursor.cursor_id) if read_pref == ReadPreference.PRIMARY: msg = "Expected cursor's address to be %s, got %s" % ( c.primary, cursor.address) self.assertEqual(cursor.address, c.primary, msg) else: self.assertNotEqual( cursor.address, c.primary, "Expected cursor's address not to be primary") cursor_id = cursor.cursor_id # Cursor dead on server - trigger a getMore on the same cursor_id # and check that the server returns an error. cursor2 = cursor.clone() cursor2._Cursor__id = cursor_id if sys.platform.startswith('java') or 'PyPy' in sys.version: # Explicitly kill cursor. cursor.close() else: # Implicitly kill it in CPython. del cursor time.sleep(5) self.assertRaises(OperationFailure, lambda: list(cursor2))
def test_max_idle_time_checkout(self): with client_knobs(kill_cursor_frequency=99999999): client = MongoClient(host, port, maxIdleTimeMS=.5) time.sleep(1) server = client._get_topology().select_server(any_server_selector) with server._pool.get_socket({}) as sock_info: pass time.sleep(1) with server._pool.get_socket({}) as new_sock_info: self.assertNotEqual(sock_info, new_sock_info) self.assertEqual(1, len(server._pool.sockets)) self.assertFalse(sock_info in server._pool.sockets) self.assertTrue(new_sock_info in server._pool.sockets) client = MongoClient(host, port) server = client._get_topology().select_server(any_server_selector) with server._pool.get_socket({}) as sock_info: pass time.sleep(1) with server._pool.get_socket({}) as new_sock_info: self.assertEqual(sock_info, new_sock_info) self.assertEqual(1, len(server._pool.sockets))
def _test_network_error(self, operation_callback): # Verify only the disconnected server is reset by a network failure. # Disable background refresh. with client_knobs(heartbeat_frequency=999999): c = MockClient( standalones=[], members=['a:1', 'b:2'], mongoses=[], host='a:1', replicaSet='rs', connect=False) # Set host-specific information so we can test whether it is reset. c.set_wire_version_range('a:1', 0, 1) c.set_wire_version_range('b:2', 0, 2) c._get_topology().select_servers(writable_server_selector) wait_until(lambda: len(c.nodes) == 2, 'connect') c.kill_host('a:1') # MongoClient is disconnected from the primary. self.assertRaises(AutoReconnect, operation_callback, c) # The primary's description is reset. server_a = c._get_topology().get_server_by_address(('a', 1)) sd_a = server_a.description self.assertEqual(SERVER_TYPE.Unknown, sd_a.server_type) self.assertEqual(0, sd_a.min_wire_version) self.assertEqual(0, sd_a.max_wire_version) # ...but not the secondary's. server_b = c._get_topology().get_server_by_address(('b', 2)) sd_b = server_b.description self.assertEqual(SERVER_TYPE.RSSecondary, sd_b.server_type) self.assertEqual(0, sd_b.min_wire_version) self.assertEqual(2, sd_b.max_wire_version)
def run_scenario(self, scenario_def, test): """Run a CMAP spec test.""" self.logs = [] self.assertEqual(scenario_def['version'], 1) self.assertIn(scenario_def['style'], ['unit', 'integration']) self.listener = CMAPListener() self._ops = [] # Configure the fail point before creating the client. if 'failPoint' in test: fp = test['failPoint'] self.set_fail_point(fp) self.addCleanup(self.set_fail_point, { 'configureFailPoint': fp['configureFailPoint'], 'mode': 'off' }) opts = test['poolOptions'].copy() opts['event_listeners'] = [self.listener] opts['_monitor_class'] = DummyMonitor opts['connect'] = False with client_knobs(kill_cursor_frequency=.05, min_heartbeat_interval=.05): client = single_client(**opts) # Update the SD to a known type because the DummyMonitor will not. # Note we cannot simply call topology.on_change because that would # internally call pool.ready() which introduces unexpected # PoolReadyEvents. Instead, update the initial state before # opening the Topology. td = client_context.client._topology.description sd = td.server_descriptions()[(client_context.host, client_context.port)] client._topology._description = updated_topology_description( client._topology._description, sd) client._get_topology() self.addCleanup(client.close) self.pool = list(client._topology._servers.values())[0].pool # Map of target names to Thread objects. self.targets = dict() # Map of label names to Connection objects self.labels = dict() def cleanup(): for t in self.targets.values(): t.stop() for t in self.targets.values(): t.join(5) for conn in self.labels.values(): conn.close_socket(None) self.addCleanup(cleanup) try: if test['error']: with self.assertRaises(PyMongoError) as ctx: self.run_operations(test['operations']) self.check_error(ctx.exception, test['error']) else: self.run_operations(test['operations']) self.check_events(test['events'], test['ignore']) except Exception: # Print the events after a test failure. print('\nFailed test: %r' % (test['description'], )) print('Operations:') for op in self._ops: print(op) print('Threads:') print(self.targets) print('Connections:') print(self.labels) print('Events:') for event in self.listener.events: print(event) print('Log:') for log in self.logs: print(log) raise
def run_scenario(self, dns_response, expect_change): # Patch timeouts to ensure short rescan SRV interval. with client_knobs(heartbeat_frequency=WAIT_TIME, min_heartbeat_interval=WAIT_TIME, events_queue_frequency=WAIT_TIME): self._run_scenario(dns_response, expect_change)
def run_scenario(self, scenario_def, test): """Run a CMAP spec test.""" self.logs = [] self.assertEqual(scenario_def['version'], 1) self.assertIn(scenario_def['style'], ['unit', 'integration']) self.listener = CMAPListener() self._ops = [] # Configure the fail point before creating the client. if 'failPoint' in test: fp = test['failPoint'] self.set_fail_point(fp) self.addCleanup(self.set_fail_point, { 'configureFailPoint': fp['configureFailPoint'], 'mode': 'off'}) opts = test['poolOptions'].copy() opts['event_listeners'] = [self.listener] opts['_monitor_class'] = DummyMonitor with client_knobs(kill_cursor_frequency=.05, min_heartbeat_interval=.05): client = single_client(**opts) self.addCleanup(client.close) # self.pool = get_pools(client)[0] self.pool = list(client._get_topology()._servers.values())[0].pool # Map of target names to Thread objects. self.targets = dict() # Map of label names to Connection objects self.labels = dict() def cleanup(): for t in self.targets.values(): t.stop() for t in self.targets.values(): t.join(5) for conn in self.labels.values(): conn.close_socket(None) self.addCleanup(cleanup) try: if test['error']: with self.assertRaises(PyMongoError) as ctx: self.run_operations(test['operations']) self.check_error(ctx.exception, test['error']) else: self.run_operations(test['operations']) self.check_events(test['events'], test['ignore']) except Exception: # Print the events after a test failure. print('\nFailed test: %r' % (test['description'],)) print('Operations:') for op in self._ops: print(op) print('Threads:') print(self.targets) print('Connections:') print(self.labels) print('Events:') for event in self.listener.events: print(event) print('Log:') for log in self.logs: print(log) raise
def setUpClass(cls): super(TestAllScenarios, cls).setUpClass() # Speed up the tests by decreasing the heartbeat frequency. cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) cls.knobs.enable()
def run_scenario(self): responses = (r for r in scenario_def['phases'][0]['responses']) with client_knobs(events_queue_frequency=0.1): class MockMonitor(Monitor): def __init__(self, server_description, topology, pool, topology_settings): """Have to copy entire constructor from Monitor so that we can override _run and change the periodic executor's interval.""" self._server_description = server_description self._pool = pool self._settings = topology_settings self._avg_round_trip_time = MovingAverage() options = self._settings._pool_options self._listeners = options.event_listeners self._publish = self._listeners is not None def target(): monitor = self_ref() if monitor is None: return False MockMonitor._run(monitor) # Change target to subclass return True # Shorten interval executor = periodic_executor.PeriodicExecutor( interval=0.1, min_interval=0.1, target=target, name="pymongo_server_monitor_thread") self._executor = executor self_ref = weakref.ref(self, executor.close) self._topology = weakref.proxy(topology, executor.close) def _run(self): try: if self._server_description.address != ('a', 27017): # Because PyMongo doesn't keep information about # the order of addresses, we might accidentally # start a MockMonitor on the wrong server first, # so we need to only mock responses for the server # the test's response is supposed to come from. return response = next(responses)[1] isMaster = IsMaster(response) self._server_description = ServerDescription( address=self._server_description.address, ismaster=isMaster) self._topology.on_change(self._server_description) except (ReferenceError, StopIteration): # Topology was garbage-collected. self.close() m = single_client(h=scenario_def['uri'], p=27017, event_listeners=(self.all_listener,), _monitor_class=MockMonitor) expected_results = scenario_def['phases'][0]['outcome']['events'] expected_len = len(expected_results) wait_until(lambda: len(self.all_listener.results) >= expected_len, "publish all events", timeout=15) try: i = 0 while i < expected_len: result = self.all_listener.results[i] if len( self.all_listener.results) > i else None # The order of ServerOpening/ClosedEvents doesn't matter if (isinstance(result, monitoring.ServerOpeningEvent) or isinstance(result, monitoring.ServerClosedEvent)): i, passed, message = compare_multiple_events( i, expected_results, self.all_listener.results) self.assertTrue(passed, message) else: self.assertTrue( *compare_events(expected_results[i], result)) i += 1 finally: m.close()
def setUp(self): super(TopologyTest, self).setUp() self.client_knobs = client_knobs(heartbeat_frequency=999999) self.client_knobs.enable() self.addCleanup(self.client_knobs.disable)
def run_scenario(self): with client_knobs(events_queue_frequency=0.1): _run_scenario(self)
def run_scenario(self): responses = (r for r in scenario_def['phases'][0]['responses']) with client_knobs(events_queue_frequency=0.1): class MockMonitor(Monitor): def __init__(self, server_description, topology, pool, topology_settings): """Have to copy entire constructor from Monitor so that we can override _run and change the periodic executor's interval.""" self._server_description = server_description self._pool = pool self._settings = topology_settings self._avg_round_trip_time = MovingAverage() options = self._settings._pool_options self._listeners = options.event_listeners self._publish = self._listeners is not None def target(): monitor = self_ref() if monitor is None: return False MockMonitor._run(monitor) # Change target to subclass return True # Shorten interval executor = periodic_executor.PeriodicExecutor( interval=0.1, min_interval=0.1, target=target, name="pymongo_server_monitor_thread") self._executor = executor self_ref = weakref.ref(self, executor.close) self._topology = weakref.proxy(topology, executor.close) def _run(self): try: if self._server_description.address != ('a', 27017): # Because PyMongo doesn't keep information about # the order of addresses, we might accidentally # start a MockMonitor on the wrong server first, # so we need to only mock responses for the server # the test's response is supposed to come from. return response = next(responses)[1] isMaster = IsMaster(response) self._server_description = ServerDescription( address=self._server_description.address, ismaster=isMaster) self._topology.on_change(self._server_description) except (ReferenceError, StopIteration): # Topology was garbage-collected. self.close() m = single_client(h=scenario_def['uri'], p=27017, event_listeners=(self.all_listener, ), _monitor_class=MockMonitor) expected_results = scenario_def['phases'][0]['outcome']['events'] expected_len = len(expected_results) wait_until(lambda: len(self.all_listener.results) >= expected_len, "publish all events", timeout=15) try: i = 0 while i < expected_len: result = self.all_listener.results[i] if len( self.all_listener.results) > i else None # The order of ServerOpening/ClosedEvents doesn't matter if (isinstance(result, monitoring.ServerOpeningEvent) or isinstance(result, monitoring.ServerClosedEvent)): i, passed, message = compare_multiple_events( i, expected_results, self.all_listener.results) self.assertTrue(passed, message) else: self.assertTrue( *compare_events(expected_results[i], result)) i += 1 finally: m.close()