def test_mongo_client(self): pair = client_context.pair m = rs_or_single_client(w=0) coll = m.pymongo_test.write_concern_test coll.drop() doc = {"_id": ObjectId()} coll.insert_one(doc) self.assertTrue(coll.insert_one(doc)) coll = coll.with_options(write_concern=WriteConcern(w=1)) self.assertRaises(OperationFailure, coll.insert_one, doc) m = rs_or_single_client() coll = m.pymongo_test.write_concern_test new_coll = coll.with_options(write_concern=WriteConcern(w=0)) self.assertTrue(new_coll.insert_one(doc)) self.assertRaises(OperationFailure, coll.insert_one, doc) m = rs_or_single_client("mongodb://%s/" % (pair,), replicaSet=client_context.replica_set_name) coll = m.pymongo_test.write_concern_test self.assertRaises(OperationFailure, coll.insert_one, doc) m = rs_or_single_client("mongodb://%s/?w=0" % (pair,), replicaSet=client_context.replica_set_name) coll = m.pymongo_test.write_concern_test coll.insert_one(doc) # Equality tests direct = connected(single_client(w=0)) direct2 = connected(single_client("mongodb://%s/?w=0" % (pair,), **self.credentials)) self.assertEqual(direct, direct2) self.assertFalse(direct != direct2)
def test_mongos_max_staleness(self): # Sanity check that we're sending maxStalenessSeconds coll = client_context.client.pymongo_test.get_collection( "test", read_preference=SecondaryPreferred(max_staleness=120)) # No error coll.find_one() coll = client_context.client.pymongo_test.get_collection( "test", read_preference=SecondaryPreferred(max_staleness=10)) try: coll.find_one() except OperationFailure as exc: self.assertEqual(160, exc.code) else: self.fail("mongos accepted invalid staleness") coll = single_client( readPreference='secondaryPreferred', maxStalenessSeconds=120).pymongo_test.test # No error coll.find_one() coll = single_client( readPreference='secondaryPreferred', maxStalenessSeconds=10).pymongo_test.test try: coll.find_one() except OperationFailure as exc: self.assertEqual(160, exc.code) else: self.fail("mongos accepted invalid staleness")
def test_mongos_max_staleness(self): # Sanity check that we're sending maxStalenessSeconds coll = client_context.client.pymongo_test.get_collection( "test", read_preference=SecondaryPreferred(max_staleness=120)) # No error coll.find_one() coll = client_context.client.pymongo_test.get_collection( "test", read_preference=SecondaryPreferred(max_staleness=10)) try: coll.find_one() except OperationFailure as exc: self.assertEqual(160, exc.code) else: self.fail("mongos accepted invalid staleness") coll = single_client(readPreference='secondaryPreferred', maxStalenessSeconds=120).pymongo_test.test # No error coll.find_one() coll = single_client(readPreference='secondaryPreferred', maxStalenessSeconds=10).pymongo_test.test try: coll.find_one() except OperationFailure as exc: self.assertEqual(160, exc.code) else: self.fail("mongos accepted invalid staleness")
def test_mongo_client(self): pair = client_context.pair m = rs_or_single_client(w=0) coll = m.pymongo_test.write_concern_test coll.drop() doc = {"_id": ObjectId()} coll.insert_one(doc) self.assertTrue(coll.insert_one(doc)) coll = coll.with_options(write_concern=WriteConcern(w=1)) self.assertRaises(OperationFailure, coll.insert_one, doc) m = rs_or_single_client() coll = m.pymongo_test.write_concern_test new_coll = coll.with_options(write_concern=WriteConcern(w=0)) self.assertTrue(new_coll.insert_one(doc)) self.assertRaises(OperationFailure, coll.insert_one, doc) m = rs_or_single_client("mongodb://%s/" % (pair, ), replicaSet=client_context.replica_set_name) coll = m.pymongo_test.write_concern_test self.assertRaises(OperationFailure, coll.insert_one, doc) m = rs_or_single_client("mongodb://%s/?w=0" % (pair, ), replicaSet=client_context.replica_set_name) coll = m.pymongo_test.write_concern_test coll.insert_one(doc) # Equality tests direct = connected(single_client(w=0)) direct2 = connected( single_client("mongodb://%s/?w=0" % (pair, ), **self.credentials)) self.assertEqual(direct, direct2) self.assertFalse(direct != direct2)
def test_gridfs_secondary(self): primary_host, primary_port = self.primary primary_connection = single_client(primary_host, primary_port) secondary_host, secondary_port = one(self.secondaries) secondary_connection = single_client( secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY) # Should detect it's connected to secondary and not attempt to # create index fs = gridfs.GridFS(secondary_connection.gfsreplica, 'gfssecondarytest') # This won't detect secondary, raises error self.assertRaises(ConnectionFailure, fs.put, b'foo')
def test_monitor_waits_after_server_check_error(self): # This test implements: # https://github.com/mongodb/specifications/blob/6c5b2ac/source/server-discovery-and-monitoring/server-discovery-and-monitoring-tests.rst#monitors-sleep-at-least-minheartbeatfreqencyms-between-checks fail_ismaster = { 'mode': { 'times': 5 }, 'data': { 'failCommands': ['isMaster'], 'errorCode': 1234, 'appName': 'SDAMMinHeartbeatFrequencyTest', }, } with self.fail_point(fail_ismaster): start = time.time() client = single_client(appName='SDAMMinHeartbeatFrequencyTest', serverSelectionTimeoutMS=5000) self.addCleanup(client.close) # Force a connection. client.admin.command('ping') duration = time.time() - start # Explanation of the expected events: # 0ms: run configureFailPoint # 1ms: create MongoClient # 2ms: failed monitor handshake, 1 # 502ms: failed monitor handshake, 2 # 1002ms: failed monitor handshake, 3 # 1502ms: failed monitor handshake, 4 # 2002ms: failed monitor handshake, 5 # 2502ms: monitor handshake succeeds # 2503ms: run awaitable isMaster # 2504ms: application handshake succeeds # 2505ms: ping command succeeds self.assertGreaterEqual(duration, 2) self.assertLessEqual(duration, 3.5)
def create_mock_monitor(self, responses, uri, expected_results): with client_knobs(heartbeat_frequency=0.1, events_queue_frequency=0.1): class MockMonitor(Monitor): def _check_with_socket(self, sock_info): if isinstance(responses[1], Exception): raise responses[1] return IsMaster(responses[1]), 99 m = single_client(h=uri, event_listeners=(self.all_listener,), _monitor_class=MockMonitor, _pool_class=MockPool ) expected_len = len(expected_results) wait_until(lambda: len(self.all_listener.results) == expected_len, "publish all events", timeout=15) try: for i in range(len(expected_results)): result = self.all_listener.results[i] if len( self.all_listener.results) > i else None self.assertEqual(expected_results[i], result.__class__.__name__) self.assertEqual(result.connection_id, responses[0]) if expected_results[i] != 'ServerHeartbeatStartedEvent': if isinstance(result.reply, IsMaster): self.assertEqual(result.duration, 99) self.assertEqual(result.reply._doc, responses[1]) else: self.assertEqual(result.reply, responses[1]) finally: m.close()
def test_reads_from_secondary(self): host, port = next(iter(self.client.secondaries)) # Direct connection to a secondary. client = single_client(host, port) self.assertFalse(client.is_primary) # Regardless of read preference, we should be able to do # "reads" with a direct connection to a secondary. # See server-selection.rst#topology-type-single. self.assertEqual(client.read_preference, ReadPreference.PRIMARY) db = client.pymongo_test coll = db.test # Test find and find_one. self.assertIsNotNone(coll.find_one()) self.assertEqual(10, len(list(coll.find()))) # Test some database helpers. self.assertIsNotNone(db.collection_names()) self.assertIsNotNone(db.validate_collection("test")) self.assertIsNotNone(db.command("count", "test")) # Test some collection helpers. self.assertEqual(10, coll.count()) self.assertEqual(10, len(coll.distinct("_id"))) self.assertIsNotNone(coll.aggregate([])) self.assertIsNotNone(coll.index_information()) # Test some "magic" namespace helpers. self.assertIsNotNone(db.current_op())
def test_cache(self): client = single_client() # Force authentication. client.admin.command('ismaster') all_credentials = client._MongoClient__all_credentials credentials = all_credentials.get('admin') cache = credentials.cache self.assertIsNotNone(cache) data = cache.data self.assertIsNotNone(data) self.assertEqual(len(data), 4) ckey, skey, salt, iterations = data self.assertIsInstance(ckey, bytes) self.assertIsInstance(skey, bytes) self.assertIsInstance(salt, bytes) self.assertIsInstance(iterations, int) pool = next(iter(client._topology._servers.values()))._pool with pool.get_socket(all_credentials) as sock_info: authset = sock_info.authset cached = set(all_credentials.values()) self.assertEqual(len(cached), 1) self.assertFalse(authset - cached) self.assertFalse(cached - authset) sock_credentials = next(iter(authset)) sock_cache = sock_credentials.cache self.assertIsNotNone(sock_cache) self.assertEqual(sock_cache.data, data)
def test_not_master_error(self): address = next(iter(self.client.secondaries)) client = single_client(*address) # Clear authentication command results from the listener. client.admin.command('ismaster') self.listener.results = {} error = None try: client.pymongo_test.test.find_one_and_delete({}) except NotMasterError as exc: error = exc.errors results = self.listener.results started = results.get('started') failed = results.get('failed') self.assertIsNone(results.get('succeeded')) self.assertTrue( isinstance(started, monitoring.CommandStartedEvent)) self.assertTrue( isinstance(failed, monitoring.CommandFailedEvent)) self.assertEqual('findAndModify', failed.command_name) self.assertEqual(address, failed.connection_id) self.assertEqual(0, failed.failure.get('ok')) self.assertTrue(isinstance(failed.request_id, int)) self.assertTrue(isinstance(failed.duration_micros, int)) self.assertEqual(error, failed.failure)
def test_gridfs_secondary(self): primary_host, primary_port = self.primary primary_connection = single_client(primary_host, primary_port) secondary_host, secondary_port = one(self.secondaries) secondary_connection = single_client(secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY) primary_connection.pymongo_test.drop_collection("fs.files") primary_connection.pymongo_test.drop_collection("fs.chunks") # Should detect it's connected to secondary and not attempt to # create index fs = gridfs.GridFS(secondary_connection.pymongo_test) # This won't detect secondary, raises error self.assertRaises(ConnectionFailure, fs.put, b"foo")
def run_scenario(self, scenario_def, test): """Run a CMAP spec test.""" self.assertEqual(scenario_def['version'], 1) self.assertEqual(scenario_def['style'], 'unit') self.listener = CMAPListener() opts = test['poolOptions'].copy() opts['event_listeners'] = [self.listener] client = single_client(**opts) self.addCleanup(client.close) self.pool = get_pool(client) # Map of target names to Thread objects. self.targets = dict() # Map of label names to Connection objects self.labels = dict() def cleanup(): for t in self.targets.values(): t.stop() for t in self.targets.values(): t.join(5) for conn in self.labels.values(): conn.close_socket(None) self.addCleanup(cleanup) if test['error']: with self.assertRaises(PyMongoError) as ctx: self.run_operations(test['operations']) self.check_error(ctx.exception, test['error']) else: self.run_operations(test['operations']) self.check_events(test['events'], test['ignore'])
def test_reads_from_secondary(self): host, port = next(iter(self.client.secondaries)) # Direct connection to a secondary. client = single_client(host, port) self.assertFalse(client.is_primary) # Regardless of read preference, we should be able to do # "reads" with a direct connection to a secondary. # See server-selection.rst#topology-type-single. self.assertEqual(client.read_preference, ReadPreference.PRIMARY) db = client.pymongo_test coll = db.test # Test find and find_one. self.assertIsNotNone(coll.find_one()) self.assertEqual(10, len(list(coll.find()))) # Test some database helpers. self.assertIsNotNone(db.collection_names()) self.assertIsNotNone(db.list_collection_names()) self.assertIsNotNone(db.validate_collection("test")) self.assertIsNotNone(db.command("ping")) # Test some collection helpers. self.assertEqual(10, coll.count_documents({})) self.assertEqual(10, len(coll.distinct("_id"))) self.assertIsNotNone(coll.aggregate([])) self.assertIsNotNone(coll.index_information()) # Test some "magic" namespace helpers. self.assertIsNotNone(db.current_op())
def test_pool_unpause(self): # This test implements the prose test "Connection Pool Management" listener = CMAPHeartbeatListener() client = single_client(appName="SDAMPoolManagementTest", heartbeatFrequencyMS=500, event_listeners=[listener]) self.addCleanup(client.close) # Assert that ConnectionPoolReadyEvent occurs after the first # ServerHeartbeatSucceededEvent. listener.wait_for_event(monitoring.PoolReadyEvent, 1) pool_ready = listener.events_by_type(monitoring.PoolReadyEvent)[0] hb_succeeded = listener.events_by_type( monitoring.ServerHeartbeatSucceededEvent)[0] self.assertGreater( listener.events.index(pool_ready), listener.events.index(hb_succeeded)) listener.reset() fail_hello = { 'mode': {'times': 2}, 'data': { 'failCommands': [HelloCompat.LEGACY_CMD, 'hello'], 'errorCode': 1234, 'appName': 'SDAMPoolManagementTest', }, } with self.fail_point(fail_hello): listener.wait_for_event(monitoring.ServerHeartbeatFailedEvent, 1) listener.wait_for_event(monitoring.PoolClearedEvent, 1) listener.wait_for_event( monitoring.ServerHeartbeatSucceededEvent, 1) listener.wait_for_event(monitoring.PoolReadyEvent, 1)
def test_4_subscribe_to_events(self): listener = CMAPListener() client = single_client(event_listeners=[listener]) self.addCleanup(client.close) self.assertEqual(listener.event_count(PoolCreatedEvent), 1) # Creates a new connection. client.admin.command('isMaster') self.assertEqual(listener.event_count(ConnectionCheckOutStartedEvent), 1) self.assertEqual(listener.event_count(ConnectionCreatedEvent), 1) self.assertEqual(listener.event_count(ConnectionReadyEvent), 1) self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 1) self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 1) # Uses the existing connection. client.admin.command('isMaster') self.assertEqual(listener.event_count(ConnectionCheckOutStartedEvent), 2) self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 2) self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 2) client.close() self.assertEqual(listener.event_count(PoolClearedEvent), 1) self.assertEqual(listener.event_count(ConnectionClosedEvent), 1)
def setUpClass(cls): cls.listener = EventListener() cls.saved_listeners = monitoring._LISTENERS # Don't use any global subscribers. monitoring._LISTENERS = monitoring._Listeners([], [], [], []) cls.client = single_client(event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test
def test_5_check_out_fails_connection_error(self): listener = CMAPListener() client = single_client(event_listeners=[listener]) self.addCleanup(client.close) pool = get_pool(client) def mock_connect(*args, **kwargs): raise ConnectionFailure('connect failed') pool.connect = mock_connect # Un-patch Pool.connect to break the cyclic reference. self.addCleanup(delattr, pool, 'connect') # Attempt to create a new connection. with self.assertRaisesRegex(ConnectionFailure, 'connect failed'): client.admin.command('isMaster') self.assertIsInstance(listener.events[0], PoolCreatedEvent) self.assertIsInstance(listener.events[1], PoolReadyEvent) self.assertIsInstance(listener.events[2], ConnectionCheckOutStartedEvent) self.assertIsInstance(listener.events[3], ConnectionCheckOutFailedEvent) self.assertIsInstance(listener.events[4], PoolClearedEvent) failed_event = listener.events[3] self.assertEqual(failed_event.reason, ConnectionCheckOutFailedReason.CONN_ERROR)
def test_gridfs_secondary(self): primary_host, primary_port = self.primary primary_connection = single_client(primary_host, primary_port) secondary_host, secondary_port = one(self.secondaries) secondary_connection = single_client( secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY) primary_connection.pymongo_test.drop_collection("fs.files") primary_connection.pymongo_test.drop_collection("fs.chunks") # Should detect it's connected to secondary and not attempt to # create index fs = gridfs.GridFS(secondary_connection.pymongo_test) # This won't detect secondary, raises error self.assertRaises(ConnectionFailure, fs.put, b'foo')
def test_bool(self): client = single_client() wait_until(lambda: client.address, "discover primary") selection = Selection.from_topology_description( client._topology.description) self.assertTrue(selection) self.assertFalse(selection.with_server_descriptions([]))
def _testOperation_targetedFailPoint(self, spec): session = self.entity_map[spec['session']] if not session._pinned_address: self.fail("Cannot use targetedFailPoint operation with unpinned " "session %s" % (spec['session'], )) client = single_client('%s:%s' % session._pinned_address) self.__set_fail_point(client=client, command_args=spec['failPoint']) self.addCleanup(client.close)
def setUpClass(cls): super(TestTransactions, cls).setUpClass() # Speed up tests by reducing SDAM waiting time after a network error. cls.knobs = client_knobs(min_heartbeat_interval=0.1) cls.knobs.enable() cls.mongos_clients = [] if client_context.supports_transactions(): for address in client_context.mongoses: cls.mongos_clients.append(single_client('%s:%s' % address))
def setUpClass(cls): super(TestBulkWriteConcern, cls).setUpClass() cls.w = client_context.w cls.secondary = None if cls.w > 1: for member in client_context.hello['hosts']: if member != client_context.hello['primary']: cls.secondary = single_client(*partition_node(member)) break
def run_scenario(self): responses = (r for r in scenario_def['phases'][0]['responses']) with client_knobs(events_queue_frequency=0.1, heartbeat_frequency=0.1, min_heartbeat_interval=0.1): class MockMonitor(Monitor): """Override the _run method""" def _run(self): try: if self._server_description.address != ('a', 27017): # Because PyMongo doesn't keep information about # the order of addresses, we might accidentally # start a MockMonitor on the wrong server first, # so we need to only mock responses for the server # the test's response is supposed to come from. return response = next(responses)[1] isMaster = IsMaster(response) self._server_description = ServerDescription( address=self._server_description.address, ismaster=isMaster) self._topology.on_change(self._server_description) except (ReferenceError, StopIteration): # Topology was garbage-collected. self.close() m = single_client(h=scenario_def['uri'], p=27017, event_listeners=(self.all_listener,), _monitor_class=MockMonitor) expected_results = scenario_def['phases'][0]['outcome']['events'] expected_len = len(expected_results) wait_until(lambda: len(self.all_listener.results) >= expected_len, "publish all events", timeout=15) try: i = 0 while i < expected_len: result = self.all_listener.results[i] if len( self.all_listener.results) > i else None # The order of ServerOpening/ClosedEvents doesn't matter if (isinstance(result, monitoring.ServerOpeningEvent) or isinstance(result, monitoring.ServerClosedEvent)): i, passed, message = compare_multiple_events( i, expected_results, self.all_listener.results) self.assertTrue(passed, message) else: self.assertTrue( *compare_events(expected_results[i], result)) i += 1 finally: m.close()
def setUpClass(cls): super(TransactionsBase, cls).setUpClass() # Speed up tests by reducing SDAM waiting time after a network error. cls.knobs = client_knobs(min_heartbeat_interval=0.1) cls.knobs.enable() cls.mongos_clients = [] if client_context.supports_transactions(): for address in client_context.mongoses: cls.mongos_clients.append(single_client('%s:%s' % address))
def _run_scenario(self): class NoopMonitor(Monitor): """Override the _run method to do nothing.""" def _run(self): time.sleep(0.05) m = single_client(h=scenario_def['uri'], p=27017, event_listeners=[self.all_listener], _monitor_class=NoopMonitor) topology = m._get_topology() try: for phase in scenario_def['phases']: for (source, response) in phase['responses']: source_address = clean_node(source) topology.on_change( ServerDescription(address=source_address, ismaster=IsMaster(response), round_trip_time=0)) expected_results = phase['outcome']['events'] expected_len = len(expected_results) wait_until( lambda: len(self.all_listener.results) >= expected_len, "publish all events", timeout=15) # Wait some time to catch possible lagging extra events. time.sleep(0.5) i = 0 while i < expected_len: result = self.all_listener.results[i] if len( self.all_listener.results) > i else None # The order of ServerOpening/ClosedEvents doesn't matter if isinstance(result, (monitoring.ServerOpeningEvent, monitoring.ServerClosedEvent)): i, passed, message = compare_multiple_events( i, expected_results, self.all_listener.results) self.assertTrue(passed, message) else: self.assertTrue( *compare_events(expected_results[i], result)) i += 1 # Assert no extra events. extra_events = self.all_listener.results[expected_len:] if extra_events: self.fail('Extra events %r' % (extra_events, )) self.all_listener.reset() finally: m.close()
def test_not_master_error(self): secondary_address = one(self.secondaries) direct_client = single_client(*secondary_address) with self.assertRaises(NotMasterError): direct_client.pymongo_test.collection.insert_one({}) db = direct_client.get_database( "pymongo_test", write_concern=WriteConcern(w=0)) with self.assertRaises(NotMasterError): db.collection.insert_one({})
def test_not_master_error(self): secondary_address = one(self.secondaries) direct_client = single_client(*secondary_address) with self.assertRaises(NotMasterError): direct_client.pymongo_test.collection.insert_one({}) db = direct_client.get_database("pymongo_test", write_concern=WriteConcern(w=0)) with self.assertRaises(NotMasterError): db.collection.insert_one({})
def test_gridfs_secondary_lazy(self): # Should detect it's connected to secondary and not attempt to # create index. secondary_host, secondary_port = one(self.secondaries) client = single_client(secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False) # Still no connection. fs = gridfs.GridFS(client.test_gridfs_secondary_lazy) # Connects, doesn't create index. self.assertRaises(NoFile, fs.get_last_version) self.assertRaises(ConnectionFailure, fs.put, "data")
def run_scenario(self, scenario_def, test): """Run a CMAP spec test.""" self.assertEqual(scenario_def['version'], 1) self.assertEqual(scenario_def['style'], 'unit') self.listener = CMAPListener() self._ops = [] opts = test['poolOptions'].copy() opts['event_listeners'] = [self.listener] client = single_client(**opts) self.addCleanup(client.close) self.pool = get_pool(client) # Map of target names to Thread objects. self.targets = dict() # Map of label names to Connection objects self.labels = dict() def cleanup(): for t in self.targets.values(): t.stop() for t in self.targets.values(): t.join(5) for conn in self.labels.values(): conn.close_socket(None) self.addCleanup(cleanup) try: if test['error']: with self.assertRaises(PyMongoError) as ctx: self.run_operations(test['operations']) self.check_error(ctx.exception, test['error']) else: self.run_operations(test['operations']) self.check_events(test['events'], test['ignore']) except Exception: # Print the events after a test failure. print() print('Failed test: %r' % (test['description'],)) print('Operations:') for op in self._ops: print(op) print('Threads:') print(self.targets) print('Connections:') print(self.labels) print('Events:') for event in self.listener.events: print(event) raise
def test_close_leaves_pool_unpaused(self): # Needed until we implement PYTHON-2463. This test is related to # test_threads.TestThreads.test_client_disconnect listener = CMAPListener() client = single_client(event_listeners=[listener]) client.admin.command('ping') pool = get_pool(client) client.close() self.assertEqual(1, listener.event_count(PoolClearedEvent)) self.assertEqual(PoolState.READY, pool.state) # Checking out a connection should succeed with pool.get_socket({}): pass
def test_atexit_hook(self): client = single_client(client_context.host, client_context.port) executor = one(client._topology._servers.values())._monitor._executor connected(client) # The executor stores a weakref to itself in _EXECUTORS. ref = one([r for r in _EXECUTORS.copy() if r() is executor]) del executor del client wait_until(partial(unregistered, ref), 'unregister executor', timeout=5)
def test_gridfs_secondary(self): secondary_host, secondary_port = one(self.client.secondaries) secondary_connection = single_client( secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY) # Should detect it's connected to secondary and not attempt to # create index gfs = gridfs.GridFSBucket( secondary_connection.gfsbucketreplica, 'gfsbucketsecondarytest') # This won't detect secondary, raises error self.assertRaises(NotPrimaryError, gfs.upload_from_stream, "test_filename", b'foo')
def setUpClass(cls): super(TestBulkWriteConcern, cls).setUpClass() cls.w = client_context.w cls.secondary = None if cls.w > 1: for member in client_context.ismaster['hosts']: if member != client_context.ismaster['primary']: cls.secondary = single_client(*partition_node(member)) break # We tested wtimeout errors by specifying a write concern greater than # the number of members, but in MongoDB 2.7.8+ this causes a different # sort of error, "Not enough data-bearing nodes". In recent servers we # use a failpoint to pause replication on a secondary. cls.need_replication_stopped = client_context.version.at_least(2, 7, 8)
def test_gridfs_secondary_lazy(self): # Should detect it's connected to secondary and not attempt to # create index. secondary_host, secondary_port = one(self.client.secondaries) client = single_client(secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False) # Still no connection. fs = gridfs.GridFS(client.gfsreplica, 'gfssecondarylazytest') # Connects, doesn't create index. self.assertRaises(NoFile, fs.get_last_version) self.assertRaises(NotMasterError, fs.put, 'data')
def test_heartbeat_awaited_flag(self): hb_listener = HeartbeatEventListener() client = single_client(event_listeners=[hb_listener], heartbeatFrequencyMS=500, appName='heartbeatEventAwaitedFlag') self.addCleanup(client.close) # Force a connection. client.admin.command('ping') def hb_succeeded(event): return isinstance(event, monitoring.ServerHeartbeatSucceededEvent) def hb_failed(event): return isinstance(event, monitoring.ServerHeartbeatFailedEvent) fail_heartbeat = { 'mode': { 'times': 2 }, 'data': { 'failCommands': ['isMaster'], 'closeConnection': True, 'appName': 'heartbeatEventAwaitedFlag', }, } with self.fail_point(fail_heartbeat): wait_until(lambda: hb_listener.matching(hb_failed), "published failed event") # Reconnect. client.admin.command('ping') hb_succeeded_events = hb_listener.matching(hb_succeeded) hb_failed_events = hb_listener.matching(hb_failed) self.assertFalse(hb_succeeded_events[0].awaited) self.assertTrue(hb_failed_events[0].awaited) # Depending on thread scheduling, the failed heartbeat could occur on # the second or third check. events = [type(e) for e in hb_listener.events[:4]] if events == [ monitoring.ServerHeartbeatStartedEvent, monitoring.ServerHeartbeatSucceededEvent, monitoring.ServerHeartbeatStartedEvent, monitoring.ServerHeartbeatFailedEvent ]: self.assertFalse(hb_succeeded_events[1].awaited) else: self.assertTrue(hb_succeeded_events[1].awaited)
def test_gridfs_secondary_lazy(self): # Should detect it's connected to secondary and not attempt to # create index. secondary_host, secondary_port = one(self.secondaries) client = single_client(secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False) # Still no connection. gfs = gridfs.GridFSBucket(client.test_gridfs_secondary_lazy) # Connects, doesn't create index. self.assertRaises(NoFile, gfs.open_download_stream_by_name, "test_filename") self.assertRaises(ConnectionFailure, gfs.upload_from_stream, "test_filename", b'data')
def test_gridfs_secondary_lazy(self): # Should detect it's connected to secondary and not attempt to # create index. secondary_host, secondary_port = one(self.secondaries) client = single_client( secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False) # Still no connection. gfs = gridfs.GridFSBucket(client.test_gridfs_secondary_lazy) # Connects, doesn't create index. self.assertRaises(NoFile, gfs.open_download_stream_by_name, "test_filename") self.assertRaises(ConnectionFailure, gfs.upload_from_stream, "test_filename", b'data')
def create_mock_monitor(self, responses, uri, expected_results): listener = HeartbeatEventListener() with client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1, events_queue_frequency=0.1): class MockMonitor(Monitor): def _check_with_socket(self, sock_info, metadata=None): if isinstance(responses[1], Exception): raise responses[1] return IsMaster(responses[1]), 99 m = single_client( h=uri, event_listeners=(listener,), _monitor_class=MockMonitor, _pool_class=MockPool) expected_len = len(expected_results) # Wait for *at least* expected_len number of results. The # monitor thread may run multiple times during the execution # of this test. wait_until( lambda: len(listener.results) >= expected_len, "publish all events") try: # zip gives us len(expected_results) pairs. for expected, actual in zip(expected_results, listener.results): self.assertEqual(expected, actual.__class__.__name__) self.assertEqual(actual.connection_id, responses[0]) if expected != 'ServerHeartbeatStartedEvent': if isinstance(actual.reply, IsMaster): self.assertEqual(actual.duration, 99) self.assertEqual(actual.reply._doc, responses[1]) else: self.assertEqual(actual.reply, responses[1]) finally: m.close()
def create_mock_monitor(self, responses, uri, expected_results): listener = HeartbeatEventListener() with client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1, events_queue_frequency=0.1): class MockMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): if isinstance(responses[1], Exception): raise responses[1] return Hello(responses[1]), 99 m = single_client( h=uri, event_listeners=(listener,), _monitor_class=MockMonitor, _pool_class=MockPool) expected_len = len(expected_results) # Wait for *at least* expected_len number of results. The # monitor thread may run multiple times during the execution # of this test. wait_until( lambda: len(listener.events) >= expected_len, "publish all events") try: # zip gives us len(expected_results) pairs. for expected, actual in zip(expected_results, listener.events): self.assertEqual(expected, actual.__class__.__name__) self.assertEqual(actual.connection_id, responses[0]) if expected != 'ServerHeartbeatStartedEvent': if isinstance(actual.reply, Hello): self.assertEqual(actual.duration, 99) self.assertEqual(actual.reply._doc, responses[1]) else: self.assertEqual(actual.reply, responses[1]) finally: m.close()
def test_heartbeat_frequency_ms(self): class HeartbeatStartedListener(ServerHeartbeatListener): def __init__(self): self.results = [] def started(self, event): self.results.append(event) def succeeded(self, event): pass def failed(self, event): pass old_init = ServerHeartbeatStartedEvent.__init__ def init(self, *args): old_init(self, *args) self.time = time.time() try: ServerHeartbeatStartedEvent.__init__ = init listener = HeartbeatStartedListener() uri = "mongodb://%s:%d/?heartbeatFrequencyMS=500" % (host, port) client = single_client(uri, event_listeners=[listener]) wait_until(lambda: len(listener.results) >= 2, "record two ServerHeartbeatStartedEvents") events = listener.results # Default heartbeatFrequencyMS is 10 sec. Check the interval was # closer to 0.5 sec with heartbeatFrequencyMS configured. self.assertAlmostEqual( events[1].time - events[0].time, 0.5, delta=2) client.close() finally: ServerHeartbeatStartedEvent.__init__ = old_init
def run_scenario(self): responses = (r for r in scenario_def['phases'][0]['responses']) with client_knobs(events_queue_frequency=0.1): class MockMonitor(Monitor): def __init__(self, server_description, topology, pool, topology_settings): """Have to copy entire constructor from Monitor so that we can override _run and change the periodic executor's interval.""" self._server_description = server_description self._pool = pool self._settings = topology_settings self._avg_round_trip_time = MovingAverage() options = self._settings._pool_options self._listeners = options.event_listeners self._publish = self._listeners is not None def target(): monitor = self_ref() if monitor is None: return False MockMonitor._run(monitor) # Change target to subclass return True # Shorten interval executor = periodic_executor.PeriodicExecutor( interval=0.1, min_interval=0.1, target=target, name="pymongo_server_monitor_thread") self._executor = executor self_ref = weakref.ref(self, executor.close) self._topology = weakref.proxy(topology, executor.close) def _run(self): try: if self._server_description.address != ('a', 27017): # Because PyMongo doesn't keep information about # the order of addresses, we might accidentally # start a MockMonitor on the wrong server first, # so we need to only mock responses for the server # the test's response is supposed to come from. return response = next(responses)[1] isMaster = IsMaster(response) self._server_description = ServerDescription( address=self._server_description.address, ismaster=isMaster) self._topology.on_change(self._server_description) except (ReferenceError, StopIteration): # Topology was garbage-collected. self.close() m = single_client(h=scenario_def['uri'], p=27017, event_listeners=(self.all_listener,), _monitor_class=MockMonitor) expected_results = scenario_def['phases'][0]['outcome']['events'] expected_len = len(expected_results) wait_until(lambda: len(self.all_listener.results) >= expected_len, "publish all events", timeout=15) try: i = 0 while i < expected_len: result = self.all_listener.results[i] if len( self.all_listener.results) > i else None # The order of ServerOpening/ClosedEvents doesn't matter if (isinstance(result, monitoring.ServerOpeningEvent) or isinstance(result, monitoring.ServerClosedEvent)): i, passed, message = compare_multiple_events( i, expected_results, self.all_listener.results) self.assertTrue(passed, message) else: self.assertTrue( *compare_events(expected_results[i], result)) i += 1 finally: m.close()
def setUpClass(cls): cls.listener = EventListener() cls.saved_listeners = monitoring._LISTENERS monitoring._LISTENERS = monitoring._Listeners([]) cls.client = single_client(event_listeners=[cls.listener])
def setUpClass(cls): cls.listener = EventListener() cls.listener.add_command_filter('killCursors') cls.saved_listeners = monitoring._LISTENERS monitoring._LISTENERS = monitoring._Listeners([]) cls.client = single_client(event_listeners=[cls.listener])
def test_max_await_time_ms(self): db = self.db db.pymongo_test.drop() coll = db.create_collection("pymongo_test", capped=True, size=4096) self.assertRaises(TypeError, coll.find().max_await_time_ms, 'foo') coll.insert_one({"amalia": 1}) coll.insert_one({"amalia": 2}) coll.find().max_await_time_ms(None) coll.find().max_await_time_ms(long(1)) # When cursor is not tailable_await cursor = coll.find() self.assertEqual(None, cursor._Cursor__max_await_time_ms) cursor = coll.find().max_await_time_ms(99) self.assertEqual(None, cursor._Cursor__max_await_time_ms) # If cursor is tailable_await and timeout is unset cursor = coll.find(cursor_type=CursorType.TAILABLE_AWAIT) self.assertEqual(None, cursor._Cursor__max_await_time_ms) # If cursor is tailable_await and timeout is set cursor = coll.find( cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99) self.assertEqual(99, cursor._Cursor__max_await_time_ms) cursor = coll.find( cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms( 10).max_await_time_ms(90) self.assertEqual(90, cursor._Cursor__max_await_time_ms) listener = EventListener() listener.add_command_filter('killCursors') saved_listeners = monitoring._LISTENERS monitoring._LISTENERS = monitoring._Listeners([]) coll = single_client( event_listeners=[listener])[self.db.name].pymongo_test results = listener.results try: # Tailable_await defaults. list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT)) # find self.assertFalse('maxTimeMS' in results['started'][0].command) # getMore self.assertFalse('maxTimeMS' in results['started'][1].command) results.clear() # Tailable_await with max_await_time_ms set. list(coll.find( cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99)) # find self.assertEqual('find', results['started'][0].command_name) self.assertFalse('maxTimeMS' in results['started'][0].command) # getMore self.assertEqual('getMore', results['started'][1].command_name) self.assertTrue('maxTimeMS' in results['started'][1].command) self.assertEqual(99, results['started'][1].command['maxTimeMS']) results.clear() # Tailable_await with max_time_ms list(coll.find( cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(1)) # find self.assertEqual('find', results['started'][0].command_name) self.assertTrue('maxTimeMS' in results['started'][0].command) self.assertEqual(1, results['started'][0].command['maxTimeMS']) # getMore self.assertEqual('getMore', results['started'][1].command_name) self.assertFalse('maxTimeMS' in results['started'][1].command) results.clear() # Tailable_await with both max_time_ms and max_await_time_ms list(coll.find( cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms( 1).max_await_time_ms(99)) # find self.assertEqual('find', results['started'][0].command_name) self.assertTrue('maxTimeMS' in results['started'][0].command) self.assertEqual(1, results['started'][0].command['maxTimeMS']) # getMore self.assertEqual('getMore', results['started'][1].command_name) self.assertTrue('maxTimeMS' in results['started'][1].command) self.assertEqual(99, results['started'][1].command['maxTimeMS']) results.clear() # Non tailable_await with max_await_time_ms list(coll.find(batch_size=1).max_await_time_ms(99)) # find self.assertEqual('find', results['started'][0].command_name) self.assertFalse('maxTimeMS' in results['started'][0].command) # getMore self.assertEqual('getMore', results['started'][1].command_name) self.assertFalse('maxTimeMS' in results['started'][1].command) results.clear() # Non tailable_await with max_time_ms list(coll.find(batch_size=1).max_time_ms(99)) # find self.assertEqual('find', results['started'][0].command_name) self.assertTrue('maxTimeMS' in results['started'][0].command) self.assertEqual(99, results['started'][0].command['maxTimeMS']) # getMore self.assertEqual('getMore', results['started'][1].command_name) self.assertFalse('maxTimeMS' in results['started'][1].command) # Non tailable_await with both max_time_ms and max_await_time_ms list(coll.find(batch_size=1).max_time_ms(99).max_await_time_ms(88)) # find self.assertEqual('find', results['started'][0].command_name) self.assertTrue('maxTimeMS' in results['started'][0].command) self.assertEqual(99, results['started'][0].command['maxTimeMS']) # getMore self.assertEqual('getMore', results['started'][1].command_name) self.assertFalse('maxTimeMS' in results['started'][1].command) finally: monitoring._LISTENERS = saved_listeners