def test_close_leaves_pool_unpaused(self): # Needed until we implement PYTHON-2463. This test is related to # test_threads.TestThreads.test_client_disconnect listener = CMAPListener() client = single_client(event_listeners=[listener]) client.admin.command('ping') pool = get_pool(client) client.close() self.assertEqual(1, listener.event_count(PoolClearedEvent)) self.assertEqual(PoolState.READY, pool.state) # Checking out a connection should succeed with pool.get_socket({}): pass
def test_5_check_out_fails_connection_error(self): listener = CMAPListener() client = single_client(event_listeners=[listener]) self.addCleanup(client.close) pool = get_pool(client) def mock_connect(*args, **kwargs): raise ConnectionFailure('connect failed') pool.connect = mock_connect # Un-patch Pool.connect to break the cyclic reference. self.addCleanup(delattr, pool, 'connect') # Attempt to create a new connection. with self.assertRaisesRegex(ConnectionFailure, 'connect failed'): client.admin.command('isMaster') self.assertIsInstance(listener.events[0], PoolCreatedEvent) self.assertIsInstance(listener.events[1], PoolReadyEvent) self.assertIsInstance(listener.events[2], ConnectionCheckOutStartedEvent) self.assertIsInstance(listener.events[3], ConnectionCheckOutFailedEvent) self.assertIsInstance(listener.events[4], PoolClearedEvent) failed_event = listener.events[3] self.assertEqual(failed_event.reason, ConnectionCheckOutFailedReason.CONN_ERROR)
def test_pool_paused_error_is_retryable(self): cmap_listener = CMAPListener() cmd_listener = OvertCommandListener() client = rs_or_single_client( maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener]) self.addCleanup(client.close) for _ in range(10): cmap_listener.reset() cmd_listener.reset() threads = [InsertThread(client.pymongo_test.test) for _ in range(2)] fail_command = { 'mode': {'times': 1}, 'data': { 'failCommands': ['insert'], 'blockConnection': True, 'blockTimeMS': 1000, 'errorCode': 91, 'errorLabels': ['RetryableWriteError'], }, } with self.fail_point(fail_command): for thread in threads: thread.start() for thread in threads: thread.join() for thread in threads: self.assertTrue(thread.passed) # It's possible that SDAM can rediscover the server and mark the # pool ready before the thread in the wait queue has a chance # to run. Repeat the test until the thread actually encounters # a PoolClearedError. if cmap_listener.event_count(ConnectionCheckOutFailedEvent): break # Via CMAP monitoring, assert that the first check out succeeds. cmap_events = cmap_listener.events_by_type(( ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, PoolClearedEvent)) msg = pprint.pformat(cmap_listener.events) self.assertIsInstance(cmap_events[0], ConnectionCheckedOutEvent, msg) self.assertIsInstance(cmap_events[1], PoolClearedEvent, msg) self.assertIsInstance( cmap_events[2], ConnectionCheckOutFailedEvent, msg) self.assertEqual(cmap_events[2].reason, ConnectionCheckOutFailedReason.CONN_ERROR, msg) self.assertIsInstance(cmap_events[3], ConnectionCheckedOutEvent, msg) # Connection check out failures are not reflected in command # monitoring because we only publish command events _after_ checking # out a connection. started = cmd_listener.results['started'] msg = pprint.pformat(cmd_listener.results) self.assertEqual(3, len(started), msg) succeeded = cmd_listener.results['succeeded'] self.assertEqual(2, len(succeeded), msg) failed = cmd_listener.results['failed'] self.assertEqual(1, len(failed), msg)
def run_scenario(self, scenario_def, test): """Run a CMAP spec test.""" self.assertEqual(scenario_def['version'], 1) self.assertEqual(scenario_def['style'], 'unit') self.listener = CMAPListener() self._ops = [] opts = test['poolOptions'].copy() opts['event_listeners'] = [self.listener] client = single_client(**opts) self.addCleanup(client.close) self.pool = get_pool(client) # Map of target names to Thread objects. self.targets = dict() # Map of label names to Connection objects self.labels = dict() def cleanup(): for t in self.targets.values(): t.stop() for t in self.targets.values(): t.join(5) for conn in self.labels.values(): conn.close_socket(None) self.addCleanup(cleanup) try: if test['error']: with self.assertRaises(PyMongoError) as ctx: self.run_operations(test['operations']) self.check_error(ctx.exception, test['error']) else: self.run_operations(test['operations']) self.check_events(test['events'], test['ignore']) except Exception: # Print the events after a test failure. print() print('Failed test: %r' % (test['description'],)) print('Operations:') for op in self._ops: print(op) print('Threads:') print(self.targets) print('Connections:') print(self.labels) print('Events:') for event in self.listener.events: print(event) raise
def test_pool_paused_error_is_retryable(self): cmap_listener = CMAPListener() cmd_listener = OvertCommandListener() client = rs_or_single_client( maxPoolSize=1, heartbeatFrequencyMS=500, event_listeners=[cmap_listener, cmd_listener]) self.addCleanup(client.close) threads = [InsertThread(client.pymongo_test.test) for _ in range(3)] fail_command = { 'mode': { 'times': 1 }, 'data': { 'failCommands': ['insert'], 'blockConnection': True, 'blockTimeMS': 1000, 'errorCode': 91 }, } with self.fail_point(fail_command): for thread in threads: thread.start() for thread in threads: thread.join() for thread in threads: self.assertTrue(thread.passed) # The two threads in the wait queue fail the initial connection check # out attempt and then succeed on retry. self.assertEqual( 2, cmap_listener.event_count(ConnectionCheckOutFailedEvent)) # Connection check out failures are not reflected in command # monitoring because we only publish command events _after_ checking # out a connection. self.assertEqual(4, len(cmd_listener.results['started'])) self.assertEqual(3, len(cmd_listener.results['succeeded'])) self.assertEqual(1, len(cmd_listener.results['failed']))
def setUpClass(cls): super(TestConnectionsSurvivePrimaryStepDown, cls).setUpClass() cls.listener = CMAPListener() cls.client = rs_or_single_client(event_listeners=[cls.listener], retryWrites=False) # Ensure connections to all servers in replica set. This is to test # that the is_writable flag is properly updated for sockets that # survive a replica set election. ensure_all_connected(cls.client) cls.listener.reset() cls.db = cls.client.get_database( "step-down", write_concern=WriteConcern("majority")) cls.coll = cls.db.get_collection( "step-down", write_concern=WriteConcern("majority"))
def test_5_check_out_fails_auth_error(self): listener = CMAPListener() client = single_client(username="******", password="******", event_listeners=[listener]) self.addCleanup(client.close) # Attempt to create a new connection. with self.assertRaisesRegex(OperationFailure, 'failed'): client.admin.command('isMaster') self.assertIsInstance(listener.events[0], PoolCreatedEvent) self.assertIsInstance(listener.events[1], PoolReadyEvent) self.assertIsInstance(listener.events[2], ConnectionCheckOutStartedEvent) self.assertIsInstance(listener.events[3], ConnectionCreatedEvent) # Error happens here. self.assertIsInstance(listener.events[4], ConnectionClosedEvent) self.assertIsInstance(listener.events[5], ConnectionCheckOutFailedEvent) self.assertEqual(listener.events[5].reason, ConnectionCheckOutFailedReason.CONN_ERROR)
def test_5_check_out_fails_auth_error(self): listener = CMAPListener() client = single_client(event_listeners=[listener]) self.addCleanup(client.close) pool = get_pool(client) connect = pool.connect def mock_check_auth(self, *args, **kwargs): self.close_socket(ConnectionClosedReason.ERROR) raise ConnectionFailure('auth failed') def mock_connect(*args, **kwargs): sock_info = connect(*args, **kwargs) sock_info.check_auth = functools.partial(mock_check_auth, sock_info) # Un-patch to break the cyclic reference. self.addCleanup(delattr, sock_info, 'check_auth') return sock_info pool.connect = mock_connect # Un-patch Pool.connect to break the cyclic reference. self.addCleanup(delattr, pool, 'connect') # Attempt to create a new connection. with self.assertRaisesRegex(ConnectionFailure, 'auth failed'): client.admin.command('isMaster') self.assertIsInstance(listener.events[0], PoolCreatedEvent) self.assertIsInstance(listener.events[1], ConnectionCheckOutStartedEvent) self.assertIsInstance(listener.events[2], ConnectionCreatedEvent) # Error happens here. self.assertIsInstance(listener.events[3], ConnectionClosedEvent) self.assertIsInstance(listener.events[4], ConnectionCheckOutFailedEvent) self.assertIsInstance(listener.events[5], PoolClearedEvent) failed_event = listener.events[4] self.assertEqual( failed_event.reason, ConnectionCheckOutFailedReason.CONN_ERROR)
def run_scenario(self, scenario_def, test): self.maybe_skip_scenario(test) # Kill all sessions before and after each test to prevent an open # transaction (from a test failure) from blocking collection/database # operations during test set up and tear down. self.kill_all_sessions() self.addCleanup(self.kill_all_sessions) self.setup_scenario(scenario_def) database_name = self.get_scenario_db_name(scenario_def) collection_name = self.get_scenario_coll_name(scenario_def) # SPEC-1245 workaround StaleDbVersion on distinct for c in self.mongos_clients: c[database_name][collection_name].distinct("x") # Configure the fail point before creating the client. if 'failPoint' in test: fp = test['failPoint'] self.set_fail_point(fp) self.addCleanup(self.set_fail_point, { 'configureFailPoint': fp['configureFailPoint'], 'mode': 'off' }) listener = OvertCommandListener() pool_listener = CMAPListener() server_listener = ServerAndTopologyEventListener() # Create a new client, to avoid interference from pooled sessions. client_options = self.parse_client_options(test['clientOptions']) # MMAPv1 does not support retryable writes. if (client_options.get('retryWrites') is True and client_context.storage_engine == 'mmapv1'): self.skipTest("MMAPv1 does not support retryWrites=True") use_multi_mongos = test['useMultipleMongoses'] if client_context.is_mongos and use_multi_mongos: client = rs_client( client_context.mongos_seeds(), event_listeners=[listener, pool_listener, server_listener], **client_options) else: client = rs_client( event_listeners=[listener, pool_listener, server_listener], **client_options) self.scenario_client = client self.listener = listener self.pool_listener = pool_listener self.server_listener = server_listener # Close the client explicitly to avoid having too many threads open. self.addCleanup(client.close) # Create session0 and session1. sessions = {} session_ids = {} for i in range(2): # Don't attempt to create sessions if they are not supported by # the running server version. if not client_context.sessions_enabled: break session_name = 'session%d' % i opts = camel_to_snake_args(test['sessionOptions'][session_name]) if 'default_transaction_options' in opts: txn_opts = self.parse_options( opts['default_transaction_options']) txn_opts = client_session.TransactionOptions(**txn_opts) opts['default_transaction_options'] = txn_opts s = client.start_session(**dict(opts)) sessions[session_name] = s # Store lsid so we can access it after end_session, in check_events. session_ids[session_name] = s.session_id self.addCleanup(end_sessions, sessions) collection = client[database_name][collection_name] self.run_test_ops(sessions, collection, test) end_sessions(sessions) self.check_events(test, listener, session_ids) # Disable fail points. if 'failPoint' in test: fp = test['failPoint'] self.set_fail_point({ 'configureFailPoint': fp['configureFailPoint'], 'mode': 'off' }) # Assert final state is expected. outcome = test['outcome'] expected_c = outcome.get('collection') if expected_c is not None: outcome_coll_name = self.get_outcome_coll_name(outcome, collection) # Read from the primary with local read concern to ensure causal # consistency. outcome_coll = client_context.client[ collection.database.name].get_collection( outcome_coll_name, read_preference=ReadPreference.PRIMARY, read_concern=ReadConcern('local')) actual_data = list(outcome_coll.find(sort=[('_id', 1)])) # The expected data needs to be the left hand side here otherwise # CompareType(Binary) doesn't work. self.assertEqual(wrap_types(expected_c['data']), actual_data)
def test_4_subscribe_to_events(self): listener = CMAPListener() client = single_client(event_listeners=[listener]) self.addCleanup(client.close) self.assertEqual(listener.event_count(PoolCreatedEvent), 1) # Creates a new connection. client.admin.command('isMaster') self.assertEqual(listener.event_count(ConnectionCheckOutStartedEvent), 1) self.assertEqual(listener.event_count(ConnectionCreatedEvent), 1) self.assertEqual(listener.event_count(ConnectionReadyEvent), 1) self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 1) self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 1) # Uses the existing connection. client.admin.command('isMaster') self.assertEqual(listener.event_count(ConnectionCheckOutStartedEvent), 2) self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 2) self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 2) client.close() self.assertEqual(listener.event_count(PoolClearedEvent), 1) self.assertEqual(listener.event_count(ConnectionClosedEvent), 1)
def run_scenario(self, scenario_def, test): """Run a CMAP spec test.""" self.logs = [] self.assertEqual(scenario_def['version'], 1) self.assertIn(scenario_def['style'], ['unit', 'integration']) self.listener = CMAPListener() self._ops = [] # Configure the fail point before creating the client. if 'failPoint' in test: fp = test['failPoint'] self.set_fail_point(fp) self.addCleanup(self.set_fail_point, { 'configureFailPoint': fp['configureFailPoint'], 'mode': 'off' }) opts = test['poolOptions'].copy() opts['event_listeners'] = [self.listener] opts['_monitor_class'] = DummyMonitor opts['connect'] = False with client_knobs(kill_cursor_frequency=.05, min_heartbeat_interval=.05): client = single_client(**opts) # Update the SD to a known type because the DummyMonitor will not. # Note we cannot simply call topology.on_change because that would # internally call pool.ready() which introduces unexpected # PoolReadyEvents. Instead, update the initial state before # opening the Topology. td = client_context.client._topology.description sd = td.server_descriptions()[(client_context.host, client_context.port)] client._topology._description = updated_topology_description( client._topology._description, sd) client._get_topology() self.addCleanup(client.close) self.pool = list(client._topology._servers.values())[0].pool # Map of target names to Thread objects. self.targets = dict() # Map of label names to Connection objects self.labels = dict() def cleanup(): for t in self.targets.values(): t.stop() for t in self.targets.values(): t.join(5) for conn in self.labels.values(): conn.close_socket(None) self.addCleanup(cleanup) try: if test['error']: with self.assertRaises(PyMongoError) as ctx: self.run_operations(test['operations']) self.check_error(ctx.exception, test['error']) else: self.run_operations(test['operations']) self.check_events(test['events'], test['ignore']) except Exception: # Print the events after a test failure. print('\nFailed test: %r' % (test['description'], )) print('Operations:') for op in self._ops: print(op) print('Threads:') print(self.targets) print('Connections:') print(self.labels) print('Events:') for event in self.listener.events: print(event) print('Log:') for log in self.logs: print(log) raise
def run_scenario(self, scenario_def, test): """Run a CMAP spec test.""" self.logs = [] self.assertEqual(scenario_def['version'], 1) self.assertIn(scenario_def['style'], ['unit', 'integration']) self.listener = CMAPListener() self._ops = [] # Configure the fail point before creating the client. if 'failPoint' in test: fp = test['failPoint'] self.set_fail_point(fp) self.addCleanup(self.set_fail_point, { 'configureFailPoint': fp['configureFailPoint'], 'mode': 'off'}) opts = test['poolOptions'].copy() opts['event_listeners'] = [self.listener] opts['_monitor_class'] = DummyMonitor with client_knobs(kill_cursor_frequency=.05, min_heartbeat_interval=.05): client = single_client(**opts) self.addCleanup(client.close) # self.pool = get_pools(client)[0] self.pool = list(client._get_topology()._servers.values())[0].pool # Map of target names to Thread objects. self.targets = dict() # Map of label names to Connection objects self.labels = dict() def cleanup(): for t in self.targets.values(): t.stop() for t in self.targets.values(): t.join(5) for conn in self.labels.values(): conn.close_socket(None) self.addCleanup(cleanup) try: if test['error']: with self.assertRaises(PyMongoError) as ctx: self.run_operations(test['operations']) self.check_error(ctx.exception, test['error']) else: self.run_operations(test['operations']) self.check_events(test['events'], test['ignore']) except Exception: # Print the events after a test failure. print('\nFailed test: %r' % (test['description'],)) print('Operations:') for op in self._ops: print(op) print('Threads:') print(self.targets) print('Connections:') print(self.labels) print('Events:') for event in self.listener.events: print(event) print('Log:') for log in self.logs: print(log) raise