def test_unpin_for_non_transaction_operation(self):
        # Increase localThresholdMS and wait until both nodes are discovered
        # to avoid false positives.
        client = rs_client(client_context.mongos_seeds(),
                           localThresholdMS=1000)
        wait_until(lambda: len(client.nodes) > 1, "discover both mongoses")
        coll = client.test.test
        # Create the collection.
        coll.insert_one({})
        self.addCleanup(client.close)
        with client.start_session() as s:
            # Session is pinned to Mongos.
            with s.start_transaction():
                coll.insert_one({}, session=s)

            addresses = set()
            for _ in range(UNPIN_TEST_MAX_ATTEMPTS):
                cursor = coll.find({}, session=s)
                self.assertTrue(next(cursor))
                addresses.add(cursor.address)
                # Break early if we can.
                if len(addresses) > 1:
                    break

            self.assertGreater(len(addresses), 1)
    def test_unpin_for_non_transaction_operation(self):
        # Increase localThresholdMS and wait until both nodes are discovered
        # to avoid false positives.
        client = rs_client(client_context.mongos_seeds(),
                           localThresholdMS=1000)
        wait_until(lambda: len(client.nodes) > 1, "discover both mongoses")
        coll = client.test.test
        # Create the collection.
        coll.insert_one({})
        self.addCleanup(client.close)
        with client.start_session() as s:
            # Session is pinned to Mongos.
            with s.start_transaction():
                coll.insert_one({}, session=s)

            addresses = set()
            for _ in range(UNPIN_TEST_MAX_ATTEMPTS):
                cursor = coll.find({}, session=s)
                self.assertTrue(next(cursor))
                addresses.add(cursor.address)
                # Break early if we can.
                if len(addresses) > 1:
                    break

            self.assertGreater(len(addresses), 1)
Ejemplo n.º 3
0
 def test_load_balancing(self):
     listener = OvertCommandListener()
     # PYTHON-2584: Use a large localThresholdMS to avoid the impact of
     # varying RTTs.
     client = rs_client(client_context.mongos_seeds(),
                        appName='loadBalancingTest',
                        event_listeners=[listener],
                        localThresholdMS=10000)
     self.addCleanup(client.close)
     wait_until(lambda: len(client.nodes) == 2, 'discover both nodes')
     # Delay find commands on
     delay_finds = {
         'configureFailPoint': 'failCommand',
         'mode': {
             'times': 10000
         },
         'data': {
             'failCommands': ['find'],
             'blockConnection': True,
             'blockTimeMS': 500,
             'appName': 'loadBalancingTest',
         },
     }
     with self.fail_point(delay_finds):
         nodes = client_context.client.nodes
         self.assertEqual(len(nodes), 1)
         delayed_server = next(iter(nodes))
         freqs = self.frequencies(client, listener)
         self.assertLessEqual(freqs[delayed_server], 0.25)
     listener.reset()
     freqs = self.frequencies(client, listener)
     self.assertAlmostEqual(freqs[delayed_server], 0.50, delta=0.15)
 def test_load_balancing(self):
     listener = OvertCommandListener()
     client = rs_client(client_context.mongos_seeds(),
                        appName='loadBalancingTest',
                        event_listeners=[listener])
     self.addCleanup(client.close)
     # Delay find commands on
     delay_finds = {
         'configureFailPoint': 'failCommand',
         'mode': {'times': 10000},
         'data': {
             'failCommands': ['find'],
             'blockConnection': True,
             'blockTimeMS': 500,
             'appName': 'loadBalancingTest',
         },
     }
     with self.fail_point(delay_finds):
         nodes = client_context.client.nodes
         self.assertEqual(len(nodes), 1)
         delayed_server = next(iter(nodes))
         freqs = self.frequencies(client, listener)
         self.assertLessEqual(freqs[delayed_server], 0.25)
     listener.reset()
     freqs = self.frequencies(client, listener)
     self.assertAlmostEqual(freqs[delayed_server], 0.50, delta=0.15)
Ejemplo n.º 5
0
    def run_scenario(self, scenario_def, test):
        self.maybe_skip_scenario(test)

        # Kill all sessions before and after each test to prevent an open
        # transaction (from a test failure) from blocking collection/database
        # operations during test set up and tear down.
        self.kill_all_sessions()
        self.addCleanup(self.kill_all_sessions)
        self.setup_scenario(scenario_def)
        database_name = self.get_scenario_db_name(scenario_def)
        collection_name = self.get_scenario_coll_name(scenario_def)
        # SPEC-1245 workaround StaleDbVersion on distinct
        for c in self.mongos_clients:
            c[database_name][collection_name].distinct("x")

        # Configure the fail point before creating the client.
        if 'failPoint' in test:
            fp = test['failPoint']
            self.set_fail_point(fp)
            self.addCleanup(self.set_fail_point, {
                'configureFailPoint': fp['configureFailPoint'],
                'mode': 'off'
            })

        listener = OvertCommandListener()
        pool_listener = CMAPListener()
        server_listener = ServerAndTopologyEventListener()
        # Create a new client, to avoid interference from pooled sessions.
        client_options = self.parse_client_options(test['clientOptions'])
        # MMAPv1 does not support retryable writes.
        if (client_options.get('retryWrites') is True
                and client_context.storage_engine == 'mmapv1'):
            self.skipTest("MMAPv1 does not support retryWrites=True")
        use_multi_mongos = test['useMultipleMongoses']
        if client_context.is_mongos and use_multi_mongos:
            client = rs_client(
                client_context.mongos_seeds(),
                event_listeners=[listener, pool_listener, server_listener],
                **client_options)
        else:
            client = rs_client(
                event_listeners=[listener, pool_listener, server_listener],
                **client_options)
        self.scenario_client = client
        self.listener = listener
        self.pool_listener = pool_listener
        self.server_listener = server_listener
        # Close the client explicitly to avoid having too many threads open.
        self.addCleanup(client.close)

        # Create session0 and session1.
        sessions = {}
        session_ids = {}
        for i in range(2):
            # Don't attempt to create sessions if they are not supported by
            # the running server version.
            if not client_context.sessions_enabled:
                break
            session_name = 'session%d' % i
            opts = camel_to_snake_args(test['sessionOptions'][session_name])
            if 'default_transaction_options' in opts:
                txn_opts = self.parse_options(
                    opts['default_transaction_options'])
                txn_opts = client_session.TransactionOptions(**txn_opts)
                opts['default_transaction_options'] = txn_opts

            s = client.start_session(**dict(opts))

            sessions[session_name] = s
            # Store lsid so we can access it after end_session, in check_events.
            session_ids[session_name] = s.session_id

        self.addCleanup(end_sessions, sessions)

        collection = client[database_name][collection_name]
        self.run_test_ops(sessions, collection, test)

        end_sessions(sessions)

        self.check_events(test, listener, session_ids)

        # Disable fail points.
        if 'failPoint' in test:
            fp = test['failPoint']
            self.set_fail_point({
                'configureFailPoint': fp['configureFailPoint'],
                'mode': 'off'
            })

        # Assert final state is expected.
        outcome = test['outcome']
        expected_c = outcome.get('collection')
        if expected_c is not None:
            outcome_coll_name = self.get_outcome_coll_name(outcome, collection)

            # Read from the primary with local read concern to ensure causal
            # consistency.
            outcome_coll = client_context.client[
                collection.database.name].get_collection(
                    outcome_coll_name,
                    read_preference=ReadPreference.PRIMARY,
                    read_concern=ReadConcern('local'))
            actual_data = list(outcome_coll.find(sort=[('_id', 1)]))

            # The expected data needs to be the left hand side here otherwise
            # CompareType(Binary) doesn't work.
            self.assertEqual(wrap_types(expected_c['data']), actual_data)
Ejemplo n.º 6
0
    def run_scenario(self, scenario_def, test):
        self.maybe_skip_scenario(test)
        listener = OvertCommandListener()
        # Create a new client, to avoid interference from pooled sessions.
        # Convert test['clientOptions'] to dict to avoid a Jython bug using
        # "**" with ScenarioDict.
        client_options = dict(test['clientOptions'])
        use_multi_mongos = test['useMultipleMongoses']
        if client_context.is_mongos and use_multi_mongos:
            client = rs_client(client_context.mongos_seeds(),
                               event_listeners=[listener],
                               **client_options)
        else:
            client = rs_client(event_listeners=[listener], **client_options)
        # Close the client explicitly to avoid having too many threads open.
        self.addCleanup(client.close)

        # Kill all sessions before and after each test to prevent an open
        # transaction (from a test failure) from blocking collection/database
        # operations during test set up and tear down.
        self.kill_all_sessions()
        self.addCleanup(self.kill_all_sessions)

        database_name = scenario_def['database_name']
        write_concern_db = client_context.client.get_database(
            database_name, write_concern=WriteConcern(w='majority'))
        if 'bucket_name' in scenario_def:
            # Create a bucket for the retryable reads GridFS tests.
            collection_name = scenario_def['bucket_name']
            client_context.client.drop_database(database_name)
            if scenario_def['data']:
                data = scenario_def['data']
                # Load data.
                write_concern_db['fs.chunks'].insert_many(data['fs.chunks'])
                write_concern_db['fs.files'].insert_many(data['fs.files'])
        else:
            collection_name = scenario_def['collection_name']
            write_concern_coll = write_concern_db[collection_name]
            write_concern_coll.drop()
            write_concern_db.create_collection(collection_name)
            if scenario_def['data']:
                # Load data.
                write_concern_coll.insert_many(scenario_def['data'])

        # SPEC-1245 workaround StaleDbVersion on distinct
        for c in self.mongos_clients:
            c[database_name][collection_name].distinct("x")

        # Create session0 and session1.
        sessions = {}
        session_ids = {}
        for i in range(2):
            session_name = 'session%d' % i
            opts = camel_to_snake_args(test['sessionOptions'][session_name])
            if 'default_transaction_options' in opts:
                txn_opts = opts['default_transaction_options']
                if 'readConcern' in txn_opts:
                    read_concern = ReadConcern(**dict(txn_opts['readConcern']))
                else:
                    read_concern = None
                if 'writeConcern' in txn_opts:
                    write_concern = WriteConcern(
                        **dict(txn_opts['writeConcern']))
                else:
                    write_concern = None

                if 'readPreference' in txn_opts:
                    read_pref = parse_read_preference(
                        txn_opts['readPreference'])
                else:
                    read_pref = None

                txn_opts = client_session.TransactionOptions(
                    read_concern=read_concern,
                    write_concern=write_concern,
                    read_preference=read_pref,
                )
                opts['default_transaction_options'] = txn_opts

            s = client.start_session(**dict(opts))

            sessions[session_name] = s
            # Store lsid so we can access it after end_session, in check_events.
            session_ids[session_name] = s.session_id

        self.addCleanup(end_sessions, sessions)

        if 'failPoint' in test:
            self.set_fail_point(test['failPoint'])
            self.addCleanup(self.set_fail_point, {
                'configureFailPoint': 'failCommand',
                'mode': 'off'
            })

        listener.results.clear()

        collection = client[database_name][collection_name]
        self.run_operations(sessions, collection, test['operations'])

        end_sessions(sessions)

        self.check_events(test, listener, session_ids)

        # Disable fail points.
        if 'failPoint' in test:
            self.set_fail_point({
                'configureFailPoint': 'failCommand',
                'mode': 'off'
            })

        # Assert final state is expected.
        expected_c = test['outcome'].get('collection')
        if expected_c is not None:
            # Read from the primary with local read concern to ensure causal
            # consistency.
            primary_coll = collection.with_options(
                read_preference=ReadPreference.PRIMARY,
                read_concern=ReadConcern('local'))
            self.assertEqual(list(primary_coll.find()), expected_c['data'])
    def run_scenario(self):
        if test.get('skipReason'):
            raise unittest.SkipTest(test.get('skipReason'))

        listener = OvertCommandListener()
        # Create a new client, to avoid interference from pooled sessions.
        # Convert test['clientOptions'] to dict to avoid a Jython bug using
        # "**" with ScenarioDict.
        client_options = dict(test['clientOptions'])
        use_multi_mongos = test['useMultipleMongoses']
        if client_context.is_mongos and use_multi_mongos:
            client = rs_client(client_context.mongos_seeds(),
                               event_listeners=[listener],
                               **client_options)
        else:
            client = rs_client(event_listeners=[listener], **client_options)
        # Close the client explicitly to avoid having too many threads open.
        self.addCleanup(client.close)

        # Kill all sessions before and after each test to prevent an open
        # transaction (from a test failure) from blocking collection/database
        # operations during test set up and tear down.
        self.kill_all_sessions()
        self.addCleanup(self.kill_all_sessions)

        database_name = scenario_def['database_name']
        collection_name = scenario_def['collection_name']
        write_concern_db = client.get_database(
            database_name, write_concern=WriteConcern(w='majority'))
        write_concern_coll = write_concern_db[collection_name]
        write_concern_coll.drop()
        write_concern_db.create_collection(collection_name)
        if scenario_def['data']:
            # Load data.
            write_concern_coll.insert_many(scenario_def['data'])

        # Create session0 and session1.
        sessions = {}
        session_ids = {}
        for i in range(2):
            session_name = 'session%d' % i
            opts = camel_to_snake_args(test['sessionOptions'][session_name])
            if 'default_transaction_options' in opts:
                txn_opts = opts['default_transaction_options']
                if 'readConcern' in txn_opts:
                    read_concern = ReadConcern(**dict(txn_opts['readConcern']))
                else:
                    read_concern = None
                if 'writeConcern' in txn_opts:
                    write_concern = WriteConcern(
                        **dict(txn_opts['writeConcern']))
                else:
                    write_concern = None

                if 'readPreference' in txn_opts:
                    read_pref = parse_read_preference(
                        txn_opts['readPreference'])
                else:
                    read_pref = None

                txn_opts = client_session.TransactionOptions(
                    read_concern=read_concern,
                    write_concern=write_concern,
                    read_preference=read_pref,
                )
                opts['default_transaction_options'] = txn_opts

            s = client.start_session(**dict(opts))

            sessions[session_name] = s
            # Store lsid so we can access it after end_session, in check_events.
            session_ids[session_name] = s.session_id

        self.addCleanup(end_sessions, sessions)

        if 'failPoint' in test:
            self.set_fail_point(test['failPoint'])
            self.addCleanup(self.set_fail_point, {
                'configureFailPoint': 'failCommand',
                'mode': 'off'
            })

        listener.results.clear()
        collection = client[database_name][collection_name]

        for op in test['operations']:
            expected_result = op.get('result')
            if expect_error(expected_result):
                with self.assertRaises(PyMongoError,
                                       msg=op['name']) as context:
                    self.run_operation(sessions, collection, op.copy())

                if expect_error_message(expected_result):
                    self.assertIn(expected_result['errorContains'].lower(),
                                  str(context.exception).lower())
                if expect_error_code(expected_result):
                    self.assertEqual(expected_result['errorCodeName'],
                                     context.exception.details.get('codeName'))
                if expect_error_labels_contain(expected_result):
                    self.assertErrorLabelsContain(
                        context.exception,
                        expected_result['errorLabelsContain'])
                if expect_error_labels_omit(expected_result):
                    self.assertErrorLabelsOmit(
                        context.exception, expected_result['errorLabelsOmit'])
            else:
                result = self.run_operation(sessions, collection, op.copy())
                if 'result' in op:
                    if op['name'] == 'runCommand':
                        self.check_command_result(expected_result, result)
                    else:
                        self.check_result(expected_result, result)

        for s in sessions.values():
            s.end_session()

        self.check_events(test, listener, session_ids)

        # Disable fail points.
        self.set_fail_point({
            'configureFailPoint': 'failCommand',
            'mode': 'off'
        })

        # Assert final state is expected.
        expected_c = test['outcome'].get('collection')
        if expected_c is not None:
            # Read from the primary with local read concern to ensure causal
            # consistency.
            primary_coll = collection.with_options(
                read_preference=ReadPreference.PRIMARY,
                read_concern=ReadConcern('local'))
            self.assertEqual(list(primary_coll.find()), expected_c['data'])
    def run_scenario(self):
        if test.get('skipReason'):
            raise unittest.SkipTest(test.get('skipReason'))

        listener = OvertCommandListener()
        # Create a new client, to avoid interference from pooled sessions.
        # Convert test['clientOptions'] to dict to avoid a Jython bug using
        # "**" with ScenarioDict.
        client_options = dict(test['clientOptions'])
        use_multi_mongos = test['useMultipleMongoses']
        if client_context.is_mongos and use_multi_mongos:
            client = rs_client(client_context.mongos_seeds(),
                               event_listeners=[listener], **client_options)
        else:
            client = rs_client(event_listeners=[listener], **client_options)
        # Close the client explicitly to avoid having too many threads open.
        self.addCleanup(client.close)

        # Kill all sessions before and after each test to prevent an open
        # transaction (from a test failure) from blocking collection/database
        # operations during test set up and tear down.
        self.kill_all_sessions()
        self.addCleanup(self.kill_all_sessions)

        database_name = scenario_def['database_name']
        collection_name = scenario_def['collection_name']
        # Don't use the test client to load data.
        write_concern_db = client_context.client.get_database(
            database_name, write_concern=WriteConcern(w='majority'))
        write_concern_coll = write_concern_db[collection_name]
        write_concern_coll.drop()
        write_concern_db.create_collection(collection_name)
        if scenario_def['data']:
            # Load data.
            write_concern_coll.insert_many(scenario_def['data'])

        # SPEC-1245 workaround StaleDbVersion on distinct
        for c in self.mongos_clients:
            c[database_name][collection_name].distinct("x")

        # Create session0 and session1.
        sessions = {}
        session_ids = {}
        for i in range(2):
            session_name = 'session%d' % i
            opts = camel_to_snake_args(test['sessionOptions'][session_name])
            if 'default_transaction_options' in opts:
                txn_opts = opts['default_transaction_options']
                if 'readConcern' in txn_opts:
                    read_concern = ReadConcern(
                        **dict(txn_opts['readConcern']))
                else:
                    read_concern = None
                if 'writeConcern' in txn_opts:
                    write_concern = WriteConcern(
                        **dict(txn_opts['writeConcern']))
                else:
                    write_concern = None

                if 'readPreference' in txn_opts:
                    read_pref = parse_read_preference(
                        txn_opts['readPreference'])
                else:
                    read_pref = None

                txn_opts = client_session.TransactionOptions(
                    read_concern=read_concern,
                    write_concern=write_concern,
                    read_preference=read_pref,
                )
                opts['default_transaction_options'] = txn_opts

            s = client.start_session(**dict(opts))

            sessions[session_name] = s
            # Store lsid so we can access it after end_session, in check_events.
            session_ids[session_name] = s.session_id

        self.addCleanup(end_sessions, sessions)

        if 'failPoint' in test:
            self.set_fail_point(test['failPoint'])
            self.addCleanup(self.set_fail_point, {
                'configureFailPoint': 'failCommand', 'mode': 'off'})

        listener.results.clear()
        collection = client[database_name][collection_name]

        self.run_operations(sessions, collection, test['operations'])

        for s in sessions.values():
            s.end_session()

        self.check_events(test, listener, session_ids)

        # Disable fail points.
        self.set_fail_point({
            'configureFailPoint': 'failCommand', 'mode': 'off'})

        # Assert final state is expected.
        expected_c = test['outcome'].get('collection')
        if expected_c is not None:
            # Read from the primary with local read concern to ensure causal
            # consistency.
            primary_coll = collection.with_options(
                read_preference=ReadPreference.PRIMARY,
                read_concern=ReadConcern('local'))
            self.assertEqual(list(primary_coll.find()), expected_c['data'])
Ejemplo n.º 9
0
    def _create_entity(self, entity_spec):
        if len(entity_spec) != 1:
            self._test_class.fail(
                "Entity spec %s did not contain exactly one top-level key" %
                (entity_spec, ))

        entity_type, spec = next(iteritems(entity_spec))
        if entity_type == 'client':
            kwargs = {}
            observe_events = spec.get('observeEvents', [])
            ignore_commands = spec.get('ignoreCommandMonitoringEvents', [])
            if len(observe_events) or len(ignore_commands):
                ignore_commands = [cmd.lower() for cmd in ignore_commands]
                listener = EventListenerUtil(observe_events, ignore_commands)
                self._listeners[spec['id']] = listener
                kwargs['event_listeners'] = [listener]
            if client_context.is_mongos and spec.get('useMultipleMongoses'):
                kwargs['h'] = client_context.mongos_seeds()
            kwargs.update(spec.get('uriOptions', {}))
            server_api = spec.get('serverApi')
            if server_api:
                kwargs['server_api'] = ServerApi(
                    server_api['version'],
                    strict=server_api.get('strict'),
                    deprecation_errors=server_api.get('deprecationErrors'))
            client = rs_or_single_client(**kwargs)
            self[spec['id']] = client
            self._test_class.addCleanup(client.close)
            return
        elif entity_type == 'database':
            client = self[spec['client']]
            if not isinstance(client, MongoClient):
                self._test_class.fail(
                    'Expected entity %s to be of type MongoClient, got %s' %
                    (spec['client'], type(client)))
            options = parse_collection_or_database_options(
                spec.get('databaseOptions', {}))
            self[spec['id']] = client.get_database(spec['databaseName'],
                                                   **options)
            return
        elif entity_type == 'collection':
            database = self[spec['database']]
            if not isinstance(database, Database):
                self._test_class.fail(
                    'Expected entity %s to be of type Database, got %s' %
                    (spec['database'], type(database)))
            options = parse_collection_or_database_options(
                spec.get('collectionOptions', {}))
            self[spec['id']] = database.get_collection(spec['collectionName'],
                                                       **options)
            return
        elif entity_type == 'session':
            client = self[spec['client']]
            if not isinstance(client, MongoClient):
                self._test_class.fail(
                    'Expected entity %s to be of type MongoClient, got %s' %
                    (spec['client'], type(client)))
            opts = camel_to_snake_args(spec.get('sessionOptions', {}))
            if 'default_transaction_options' in opts:
                txn_opts = parse_spec_options(
                    opts['default_transaction_options'])
                txn_opts = TransactionOptions(**txn_opts)
                opts = copy.deepcopy(opts)
                opts['default_transaction_options'] = txn_opts
            session = client.start_session(**dict(opts))
            self[spec['id']] = session
            self._session_lsids[spec['id']] = copy.deepcopy(session.session_id)
            self._test_class.addCleanup(session.end_session)
            return
        elif entity_type == 'bucket':
            # TODO: implement the 'bucket' entity type
            self._test_class.skipTest(
                'GridFS is not currently supported (PYTHON-2459)')
        self._test_class.fail('Unable to create entity of unknown type %s' %
                              (entity_type, ))