def test_pool_paused_error_is_retryable(self): cmap_listener = CMAPListener() cmd_listener = OvertCommandListener() client = rs_or_single_client( maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener]) self.addCleanup(client.close) for _ in range(10): cmap_listener.reset() cmd_listener.reset() threads = [InsertThread(client.pymongo_test.test) for _ in range(2)] fail_command = { 'mode': {'times': 1}, 'data': { 'failCommands': ['insert'], 'blockConnection': True, 'blockTimeMS': 1000, 'errorCode': 91, 'errorLabels': ['RetryableWriteError'], }, } with self.fail_point(fail_command): for thread in threads: thread.start() for thread in threads: thread.join() for thread in threads: self.assertTrue(thread.passed) # It's possible that SDAM can rediscover the server and mark the # pool ready before the thread in the wait queue has a chance # to run. Repeat the test until the thread actually encounters # a PoolClearedError. if cmap_listener.event_count(ConnectionCheckOutFailedEvent): break # Via CMAP monitoring, assert that the first check out succeeds. cmap_events = cmap_listener.events_by_type(( ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, PoolClearedEvent)) msg = pprint.pformat(cmap_listener.events) self.assertIsInstance(cmap_events[0], ConnectionCheckedOutEvent, msg) self.assertIsInstance(cmap_events[1], PoolClearedEvent, msg) self.assertIsInstance( cmap_events[2], ConnectionCheckOutFailedEvent, msg) self.assertEqual(cmap_events[2].reason, ConnectionCheckOutFailedReason.CONN_ERROR, msg) self.assertIsInstance(cmap_events[3], ConnectionCheckedOutEvent, msg) # Connection check out failures are not reflected in command # monitoring because we only publish command events _after_ checking # out a connection. started = cmd_listener.results['started'] msg = pprint.pformat(cmd_listener.results) self.assertEqual(3, len(started), msg) succeeded = cmd_listener.results['succeeded'] self.assertEqual(2, len(succeeded), msg) failed = cmd_listener.results['failed'] self.assertEqual(1, len(failed), msg)
def test_transaction_starts_with_batched_write(self): if 'PyPy' in sys.version and client_context.tls: self.skipTest('PYTHON-2937 PyPy is so slow sending large ' 'messages over TLS that this test fails') # Start a transaction with a batch of operations that needs to be # split. listener = OvertCommandListener() client = rs_client(event_listeners=[listener]) coll = client[self.db.name].test coll.delete_many({}) listener.reset() self.addCleanup(client.close) self.addCleanup(coll.drop) large_str = '\0' * (10 * 1024 * 1024) ops = [InsertOne({'a': large_str}) for _ in range(10)] with client.start_session() as session: with session.start_transaction(): coll.bulk_write(ops, session=session) # Assert commands were constructed properly. self.assertEqual(['insert', 'insert', 'insert', 'commitTransaction'], listener.started_command_names()) first_cmd = listener.results['started'][0].command self.assertTrue(first_cmd['startTransaction']) lsid = first_cmd['lsid'] txn_number = first_cmd['txnNumber'] for event in listener.results['started'][1:]: self.assertNotIn('startTransaction', event.command) self.assertEqual(lsid, event.command['lsid']) self.assertEqual(txn_number, event.command['txnNumber']) self.assertEqual(10, coll.count_documents({}))
def test_load_balancing(self): listener = OvertCommandListener() # PYTHON-2584: Use a large localThresholdMS to avoid the impact of # varying RTTs. client = rs_client(client_context.mongos_seeds(), appName='loadBalancingTest', event_listeners=[listener], localThresholdMS=10000) self.addCleanup(client.close) wait_until(lambda: len(client.nodes) == 2, 'discover both nodes') # Delay find commands on delay_finds = { 'configureFailPoint': 'failCommand', 'mode': { 'times': 10000 }, 'data': { 'failCommands': ['find'], 'blockConnection': True, 'blockTimeMS': 500, 'appName': 'loadBalancingTest', }, } with self.fail_point(delay_finds): nodes = client_context.client.nodes self.assertEqual(len(nodes), 1) delayed_server = next(iter(nodes)) freqs = self.frequencies(client, listener) self.assertLessEqual(freqs[delayed_server], 0.25) listener.reset() freqs = self.frequencies(client, listener) self.assertAlmostEqual(freqs[delayed_server], 0.50, delta=0.15)
def test_load_balancing(self): listener = OvertCommandListener() client = rs_client(client_context.mongos_seeds(), appName='loadBalancingTest', event_listeners=[listener]) self.addCleanup(client.close) # Delay find commands on delay_finds = { 'configureFailPoint': 'failCommand', 'mode': {'times': 10000}, 'data': { 'failCommands': ['find'], 'blockConnection': True, 'blockTimeMS': 500, 'appName': 'loadBalancingTest', }, } with self.fail_point(delay_finds): nodes = client_context.client.nodes self.assertEqual(len(nodes), 1) delayed_server = next(iter(nodes)) freqs = self.frequencies(client, listener) self.assertLessEqual(freqs[delayed_server], 0.25) listener.reset() freqs = self.frequencies(client, listener) self.assertAlmostEqual(freqs[delayed_server], 0.50, delta=0.15)
def test_command_options_txn(self): listener = OvertCommandListener() client = rs_or_single_client(server_api=ServerApi('1'), event_listeners=[listener]) self.addCleanup(client.close) coll = client.test.test coll.insert_many([{} for _ in range(100)]) self.addCleanup(coll.delete_many, {}) listener.reset() with client.start_session() as s, s.start_transaction(): coll.insert_many([{} for _ in range(100)], session=s) list(coll.find(batch_size=25, session=s)) client.test.command('find', 'test', session=s) self.assertServerApiInAllCommands(listener.results['started'])
def test_send_hedge(self): cases = { 'primaryPreferred': PrimaryPreferred, 'secondary': Secondary, 'secondaryPreferred': SecondaryPreferred, 'nearest': Nearest, } listener = OvertCommandListener() client = rs_client(event_listeners=[listener]) self.addCleanup(client.close) client.admin.command('ping') for mode, cls in cases.items(): pref = cls(hedge={'enabled': True}) coll = client.test.get_collection('test', read_preference=pref) listener.reset() coll.find_one() started = listener.results['started'] self.assertEqual(len(started), 1, started) cmd = started[0].command self.assertIn('$readPreference', cmd) self.assertEqual(cmd['$readPreference'], pref.document)
def test_data_key(self): listener = OvertCommandListener() client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) client.db.coll.drop() vault = create_key_vault(client.keyvault.datakeys) self.addCleanup(vault.drop) # Configure the encrypted field via the local schema_map option. schemas = { "db.coll": { "bsonType": "object", "properties": { "encrypted_placeholder": { "encrypt": { "keyId": "/placeholder", "bsonType": "string", "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" } } } } } opts = AutoEncryptionOpts(self.kms_providers(), 'keyvault.datakeys', schema_map=schemas) client_encrypted = rs_or_single_client(auto_encryption_opts=opts, uuidRepresentation='standard') self.addCleanup(client_encrypted.close) client_encryption = ClientEncryption(self.kms_providers(), 'keyvault.datakeys', client, OPTS) self.addCleanup(client_encryption.close) # Local create data key. listener.reset() local_datakey_id = client_encryption.create_data_key( 'local', key_alt_names=['local_altname']) self.assertBinaryUUID(local_datakey_id) cmd = listener.results['started'][-1] self.assertEqual('insert', cmd.command_name) self.assertEqual({'w': 'majority'}, cmd.command.get('writeConcern')) docs = list(vault.find({'_id': local_datakey_id})) self.assertEqual(len(docs), 1) self.assertEqual(docs[0]['masterKey']['provider'], 'local') # Local encrypt by key_id. local_encrypted = client_encryption.encrypt( 'hello local', Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=local_datakey_id) self.assertEncrypted(local_encrypted) client_encrypted.db.coll.insert_one({ '_id': 'local', 'value': local_encrypted }) doc_decrypted = client_encrypted.db.coll.find_one({'_id': 'local'}) self.assertEqual(doc_decrypted['value'], 'hello local') # Local encrypt by key_alt_name. local_encrypted_altname = client_encryption.encrypt( 'hello local', Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_alt_name='local_altname') self.assertEqual(local_encrypted_altname, local_encrypted) # AWS create data key. listener.reset() master_key = { 'region': 'us-east-1', 'key': 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-' '9f25-e30687b580d0' } aws_datakey_id = client_encryption.create_data_key( 'aws', master_key=master_key, key_alt_names=['aws_altname']) self.assertBinaryUUID(aws_datakey_id) cmd = listener.results['started'][-1] self.assertEqual('insert', cmd.command_name) self.assertEqual({'w': 'majority'}, cmd.command.get('writeConcern')) docs = list(vault.find({'_id': aws_datakey_id})) self.assertEqual(len(docs), 1) self.assertEqual(docs[0]['masterKey']['provider'], 'aws') # AWS encrypt by key_id. aws_encrypted = client_encryption.encrypt( 'hello aws', Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=aws_datakey_id) self.assertEncrypted(aws_encrypted) client_encrypted.db.coll.insert_one({ '_id': 'aws', 'value': aws_encrypted }) doc_decrypted = client_encrypted.db.coll.find_one({'_id': 'aws'}) self.assertEqual(doc_decrypted['value'], 'hello aws') # AWS encrypt by key_alt_name. aws_encrypted_altname = client_encryption.encrypt( 'hello aws', Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_alt_name='aws_altname') self.assertEqual(aws_encrypted_altname, aws_encrypted) # Explicitly encrypting an auto encrypted field. msg = (r'Cannot encrypt element of type binData because schema ' r'requires that type is one of: \[ string \]') with self.assertRaisesRegex(EncryptionError, msg): client_encrypted.db.coll.insert_one( {'encrypted_placeholder': local_encrypted})