def test_tag_sets_validation(self):
        # Can't use tags with PRIMARY
        self.assertRaises(ConfigurationError, _ServerMode,
                          0, tag_sets=[{'k': 'v'}])

        # ... but empty tag sets are ok with PRIMARY
        self.assertRaises(ConfigurationError, _ServerMode,
                          0, tag_sets=[{}])

        S = Secondary(tag_sets=[{}])
        self.assertEqual(
            [{}],
            rs_client(read_preference=S).read_preference.tag_sets)

        S = Secondary(tag_sets=[{'k': 'v'}])
        self.assertEqual(
            [{'k': 'v'}],
            rs_client(read_preference=S).read_preference.tag_sets)

        S = Secondary(tag_sets=[{'k': 'v'}, {}])
        self.assertEqual(
            [{'k': 'v'}, {}],
            rs_client(read_preference=S).read_preference.tag_sets)

        self.assertRaises(ValueError, Secondary, tag_sets=[])

        # One dict not ok, must be a list of dicts
        self.assertRaises(TypeError, Secondary, tag_sets={'k': 'v'})

        self.assertRaises(TypeError, Secondary, tag_sets='foo')

        self.assertRaises(TypeError, Secondary, tag_sets=['foo'])
    def test_ipv6(self):
        if client_context.ssl:
            # http://bugs.python.org/issue13034
            if sys.version_info[:2] == (2, 6):
                raise SkipTest("Python 2.6 can't parse SANs")
            if not HAVE_IPADDRESS:
                raise SkipTest("Need the ipaddress module to test with SSL")

        port = client_context.port
        c = rs_client("mongodb://[::1]:%d" % (port,))

        # Client switches to IPv4 once it has first ismaster response.
        msg = 'discovered primary with IPv4 address "%r"' % (self.primary,)
        wait_until(lambda: c.primary == self.primary, msg)

        # Same outcome with both IPv4 and IPv6 seeds.
        c = rs_client("mongodb://[::1]:%d,localhost:%d" % (port, port))

        wait_until(lambda: c.primary == self.primary, msg)

        if client_context.auth_enabled:
            auth_str = "%s:%s@" % (db_user, db_pwd)
        else:
            auth_str = ""

        uri = "mongodb://%slocalhost:%d,[::1]:%d" % (auth_str, port, port)
        client = rs_client(uri)
        client.pymongo_test.test.insert_one({"dummy": u"object"})
        client.pymongo_test_bernie.test.insert_one({"dummy": u"object"})

        dbs = client.database_names()
        self.assertTrue("pymongo_test" in dbs)
        self.assertTrue("pymongo_test_bernie" in dbs)
        client.close()
    def test_ipv6(self):
        port = client_context.port
        c = rs_client("mongodb://[::1]:%d" % (port,))

        # Client switches to IPv4 once it has first ismaster response.
        msg = 'discovered primary with IPv4 address "%r"' % (self.primary,)
        wait_until(lambda: c.primary == self.primary, msg)

        # Same outcome with both IPv4 and IPv6 seeds.
        c = rs_client("[::1]:%d,localhost:%d" % (port, port))

        wait_until(lambda: c.primary == self.primary, msg)

        if client_context.auth_enabled:
            auth_str = "%s:%s@" % (db_user, db_pwd)
        else:
            auth_str = ""

        uri = "mongodb://%slocalhost:%d,[::1]:%d" % (auth_str, port, port)
        client = rs_client(uri)
        client.pymongo_test.test.insert_one({"dummy": u"object"})
        client.pymongo_test_bernie.test.insert_one({"dummy": u"object"})

        dbs = client.database_names()
        self.assertTrue("pymongo_test" in dbs)
        self.assertTrue("pymongo_test_bernie" in dbs)
        client.close()
    def test_threshold_validation(self):
        self.assertEqual(17, rs_client(
            localThresholdMS=17
        ).local_threshold_ms)

        self.assertEqual(42, rs_client(
            localThresholdMS=42
        ).local_threshold_ms)

        self.assertEqual(666, rs_client(
            localthresholdms=666
        ).local_threshold_ms)
    def test_unpin_for_non_transaction_operation(self):
        # Increase localThresholdMS and wait until both nodes are discovered
        # to avoid false positives.
        client = rs_client(client_context.mongos_seeds(),
                           localThresholdMS=1000)
        wait_until(lambda: len(client.nodes) > 1, "discover both mongoses")
        coll = client.test.test
        # Create the collection.
        coll.insert_one({})
        self.addCleanup(client.close)
        with client.start_session() as s:
            # Session is pinned to Mongos.
            with s.start_transaction():
                coll.insert_one({}, session=s)

            addresses = set()
            for _ in range(UNPIN_TEST_MAX_ATTEMPTS):
                cursor = coll.find({}, session=s)
                self.assertTrue(next(cursor))
                addresses.add(cursor.address)
                # Break early if we can.
                if len(addresses) > 1:
                    break

            self.assertGreater(len(addresses), 1)
    def test_session_gc(self):
        client = rs_client()
        self.addCleanup(client.close)
        pool = get_pool(client)
        session = client.start_session()
        session.start_transaction()
        client.test_session_gc.test.find_one({}, session=session)
        if client_context.load_balancer:
            self.assertEqual(pool.active_sockets, 1)  # Pinned.

        thread = PoolLocker(pool)
        thread.start()
        self.assertTrue(thread.locked.wait(5), 'timed out')
        # Garbage collect the session while the pool is locked to ensure we
        # don't deadlock.
        del session
        # On PyPy it can take a few rounds to collect the session.
        for _ in range(3):
            gc.collect()
        thread.unlock.set()
        thread.join(5)
        self.assertFalse(thread.is_alive())
        self.assertIsNone(thread.exc)

        wait_until(lambda: pool.active_sockets == 0, 'return socket')
        # Run another operation to ensure the socket still works.
        client[self.db.name].test.delete_many({})
    def test_nearest(self):
        # With high localThresholdMS, expect to read from any
        # member
        c = rs_client(
            read_preference=ReadPreference.NEAREST,
            localThresholdMS=10000)  # 10 seconds

        data_members = set(self.hosts).difference(set(self.arbiters))

        # This is a probabilistic test; track which members we've read from so
        # far, and keep reading until we've used all the members or give up.
        # Chance of using only 2 of 3 members 10k times if there's no bug =
        # 3 * (2/3)**10000, very low.
        used = set()
        i = 0
        while data_members.difference(used) and i < 10000:
            address = self.read_from_which_host(c)
            used.add(address)
            i += 1

        not_used = data_members.difference(used)
        latencies = ', '.join(
            '%s: %dms' % (server.description.address,
                          server.description.round_trip_time)
            for server in c._get_topology().select_servers(
                readable_server_selector))

        self.assertFalse(
            not_used,
            "Expected to use primary and all secondaries for mode NEAREST,"
            " but didn't use %s\nlatencies: %s" % (not_used, latencies))
Beispiel #8
0
    def test_timeout_does_not_mark_member_down(self):
        # If a query times out, the client shouldn't mark the member "down".

        # Disable background refresh.
        with client_knobs(heartbeat_frequency=999999):
            c = rs_client(socketTimeoutMS=1000, w=self.w)
            collection = c.pymongo_test.test
            collection.insert_one({})

            # Query the primary.
            self.assertRaises(NetworkTimeout, collection.find_one,
                              {'$where': delay(1.5)})

            self.assertTrue(c.primary)
            collection.find_one()  # No error.

            coll = collection.with_options(
                read_preference=ReadPreference.SECONDARY)

            # Query the secondary.
            self.assertRaises(NetworkTimeout, coll.find_one,
                              {'$where': delay(1.5)})

            self.assertTrue(c.secondaries)

            # No error.
            coll.find_one()
 def test_load_balancing(self):
     listener = OvertCommandListener()
     client = rs_client(client_context.mongos_seeds(),
                        appName='loadBalancingTest',
                        event_listeners=[listener])
     self.addCleanup(client.close)
     # Delay find commands on
     delay_finds = {
         'configureFailPoint': 'failCommand',
         'mode': {'times': 10000},
         'data': {
             'failCommands': ['find'],
             'blockConnection': True,
             'blockTimeMS': 500,
             'appName': 'loadBalancingTest',
         },
     }
     with self.fail_point(delay_finds):
         nodes = client_context.client.nodes
         self.assertEqual(len(nodes), 1)
         delayed_server = next(iter(nodes))
         freqs = self.frequencies(client, listener)
         self.assertLessEqual(freqs[delayed_server], 0.25)
     listener.reset()
     freqs = self.frequencies(client, listener)
     self.assertAlmostEqual(freqs[delayed_server], 0.50, delta=0.15)
Beispiel #10
0
    def test_gridfs_replica_set(self):
        rsc = rs_client(w=self.w, read_preference=ReadPreference.SECONDARY)

        gfs = gridfs.GridFSBucket(rsc.gfsbucketreplica, 'gfsbucketreplicatest')
        oid = gfs.upload_from_stream("test_filename", b'foo')
        content = gfs.open_download_stream(oid).read()
        self.assertEqual(b'foo', content)
Beispiel #11
0
    def test_gridfs_replica_set(self):
        rsc = rs_client(w=self.w, read_preference=ReadPreference.SECONDARY)

        fs = gridfs.GridFS(rsc.pymongo_test)
        oid = fs.put(b'foo')
        content = fs.get(oid).read()
        self.assertEqual(b'foo', content)
    def test_timeout_does_not_mark_member_down(self):
        # If a query times out, the client shouldn't mark the member "down".

        # Disable background refresh.
        with client_knobs(heartbeat_frequency=999999):
            c = rs_client(socketTimeoutMS=3000, w=self.w)
            collection = c.pymongo_test.test
            collection.insert_one({})

            # Query the primary.
            self.assertRaises(
                NetworkTimeout,
                collection.find_one,
                {'$where': delay(5)})

            self.assertTrue(c.primary)
            collection.find_one()  # No error.

            coll = collection.with_options(
                read_preference=ReadPreference.SECONDARY)

            # Query the secondary.
            self.assertRaises(
                NetworkTimeout,
                coll.find_one,
                {'$where': delay(5)})

            self.assertTrue(c.secondaries)

            # No error.
            coll.find_one()
    def test_gridfs_replica_set(self):
        rsc = rs_client(w=self.w, wtimeout=5000, read_preference=ReadPreference.SECONDARY)

        fs = gridfs.GridFS(rsc.pymongo_test)
        oid = fs.put(b"foo")
        content = fs.get(oid).read()
        self.assertEqual(b"foo", content)
    def test_nearest(self):
        # With high localThresholdMS, expect to read from any
        # member
        c = rs_client(
            read_preference=ReadPreference.NEAREST,
            localThresholdMS=10000)  # 10 seconds

        data_members = set(self.hosts).difference(set(self.arbiters))

        # This is a probabilistic test; track which members we've read from so
        # far, and keep reading until we've used all the members or give up.
        # Chance of using only 2 of 3 members 10k times if there's no bug =
        # 3 * (2/3)**10000, very low.
        used = set()
        i = 0
        while data_members.difference(used) and i < 10000:
            address = self.read_from_which_host(c)
            used.add(address)
            i += 1

        not_used = data_members.difference(used)
        latencies = ', '.join(
            '%s: %dms' % (server.description.address,
                          server.description.round_trip_time)
            for server in c._get_topology().select_servers(
                readable_server_selector))

        self.assertFalse(
            not_used,
            "Expected to use primary and all secondaries for mode NEAREST,"
            " but didn't use %s\nlatencies: %s" % (not_used, latencies))
    def test_commit_not_retried_after_timeout(self):
        listener = OvertCommandListener()
        client = rs_client(event_listeners=[listener])
        coll = client[self.db.name].test

        def callback(session):
            coll.insert_one({}, session=session)

        # Create the collection.
        coll.insert_one({})
        self.set_fail_point({
            'configureFailPoint': 'failCommand', 'mode': {'times': 2},
            'data': {
                'failCommands': ['commitTransaction'],
                'closeConnection': True}})
        self.addCleanup(self.set_fail_point, {
            'configureFailPoint': 'failCommand', 'mode': 'off'})
        listener.results.clear()

        with client.start_session() as s:
            with PatchSessionTimeout(0):
                with self.assertRaises(ConnectionFailure):
                    s.with_transaction(callback)

        # One insert for the callback and two commits (includes the automatic
        # retry).
        self.assertEqual(listener.started_command_names(),
                         ['insert', 'commitTransaction', 'commitTransaction'])
    def test_callback_not_retried_after_commit_timeout(self):
        listener = OvertCommandListener()
        client = rs_client(event_listeners=[listener])
        coll = client[self.db.name].test

        def callback(session):
            coll.insert_one({}, session=session)

        # Create the collection.
        coll.insert_one({})
        self.set_fail_point({
            'configureFailPoint': 'failCommand', 'mode': {'times': 1},
            'data': {
                'failCommands': ['commitTransaction'],
                'errorCode': 251,  # NoSuchTransaction
            }})
        self.addCleanup(self.set_fail_point, {
            'configureFailPoint': 'failCommand', 'mode': 'off'})
        listener.results.clear()

        with client.start_session() as s:
            with PatchSessionTimeout(0):
                with self.assertRaises(OperationFailure):
                    s.with_transaction(callback)

        self.assertEqual(listener.started_command_names(),
                         ['insert', 'commitTransaction'])
    def test_callback_not_retried_after_timeout(self):
        listener = OvertCommandListener()
        client = rs_client(event_listeners=[listener])
        coll = client[self.db.name].test

        def callback(session):
            coll.insert_one({}, session=session)
            err = {
                'ok': 0,
                'errmsg': 'Transaction 7819 has been aborted.',
                'code': 251,
                'codeName': 'NoSuchTransaction',
                'errorLabels': ['TransientTransactionError'],
            }
            raise OperationFailure(err['errmsg'], err['code'], err)

        # Create the collection.
        coll.insert_one({})
        listener.results.clear()
        with client.start_session() as s:
            with PatchSessionTimeout(0):
                with self.assertRaises(OperationFailure):
                    s.with_transaction(callback)

        self.assertEqual(listener.started_command_names(),
                         ['insert', 'abortTransaction'])
    def test_unpin_for_non_transaction_operation(self):
        # Increase localThresholdMS and wait until both nodes are discovered
        # to avoid false positives.
        client = rs_client(client_context.mongos_seeds(),
                           localThresholdMS=1000)
        wait_until(lambda: len(client.nodes) > 1, "discover both mongoses")
        coll = client.test.test
        # Create the collection.
        coll.insert_one({})
        self.addCleanup(client.close)
        with client.start_session() as s:
            # Session is pinned to Mongos.
            with s.start_transaction():
                coll.insert_one({}, session=s)

            addresses = set()
            for _ in range(UNPIN_TEST_MAX_ATTEMPTS):
                cursor = coll.find({}, session=s)
                self.assertTrue(next(cursor))
                addresses.add(cursor.address)
                # Break early if we can.
                if len(addresses) > 1:
                    break

            self.assertGreater(len(addresses), 1)
    def test_transaction_write_concern_override(self):
        """Test txn overrides Client/Database/Collection write_concern."""
        client = rs_client(w=0)
        db = client.test
        coll = db.test
        coll.insert_one({})
        with client.start_session() as s:
            with s.start_transaction(write_concern=WriteConcern(w=1)):
                self.assertTrue(coll.insert_one({}, session=s).acknowledged)
                self.assertTrue(
                    coll.insert_many([{}, {}], session=s).acknowledged)
                self.assertTrue(
                    coll.bulk_write([InsertOne({})], session=s).acknowledged)
                self.assertTrue(
                    coll.replace_one({}, {}, session=s).acknowledged)
                self.assertTrue(
                    coll.update_one({}, {
                        "$set": {
                            "a": 1
                        }
                    }, session=s).acknowledged)
                self.assertTrue(
                    coll.update_many({}, {
                        "$set": {
                            "a": 1
                        }
                    }, session=s).acknowledged)
                self.assertTrue(coll.delete_one({}, session=s).acknowledged)
                self.assertTrue(coll.delete_many({}, session=s).acknowledged)
                coll.find_one_and_delete({}, session=s)
                coll.find_one_and_replace({}, {}, session=s)
                coll.find_one_and_update({}, {"$set": {"a": 1}}, session=s)

        unsupported_txn_writes = [
            (client.drop_database, [db.name], {}),
            (db.create_collection, ['collection'], {}),
            (db.drop_collection, ['collection'], {}),
            (coll.drop, [], {}),
            (coll.map_reduce, ['function() {}', 'function() {}',
                               'output'], {}),
            (coll.rename, ['collection2'], {}),
            # Drop collection2 between tests of "rename", above.
            (coll.database.drop_collection, ['collection2'], {}),
            (coll.create_indexes, [[IndexModel('a')]], {}),
            (coll.create_index, ['a'], {}),
            (coll.drop_index, ['a_1'], {}),
            (coll.drop_indexes, [], {}),
            (coll.aggregate, [[{
                "$out": "aggout"
            }]], {}),
        ]
        for op in unsupported_txn_writes:
            op, args, kwargs = op
            with client.start_session() as s:
                kwargs['session'] = s
                s.start_transaction(write_concern=WriteConcern(w=1))
                with self.assertRaises(OperationFailure):
                    op(*args, **kwargs)
                s.abort_transaction()
    def assertReadsFrom(self, expected, **kwargs):
        c = rs_client(**kwargs)
        wait_until(lambda: len(c.nodes - c.arbiters) == self.w,
                   "discovered all nodes")

        used = self.read_from_which_kind(c)
        self.assertEqual(expected, used,
                         'Cursor used %s, expected %s' % (used, expected))
Beispiel #21
0
    def test_properties(self):
        c = client_context.client
        c.admin.command('ping')

        wait_until(lambda: c.primary == self.primary, "discover primary")
        wait_until(lambda: c.secondaries == self.secondaries,
                   "discover secondaries")

        # SERVER-32845
        if not (client_context.version >= (3, 7, 2)
                and client_context.auth_enabled and client_context.is_rs):
            wait_until(lambda: c.arbiters == self.arbiters,
                       "discover arbiters")
            self.assertEqual(c.arbiters, self.arbiters)

        self.assertEqual(c.primary, self.primary)
        self.assertEqual(c.secondaries, self.secondaries)
        self.assertEqual(c.max_pool_size, 100)

        # Make sure MongoClient's properties are copied to Database and
        # Collection.
        for obj in c, c.pymongo_test, c.pymongo_test.test:
            self.assertEqual(obj.codec_options, CodecOptions())
            self.assertEqual(obj.read_preference, ReadPreference.PRIMARY)
            self.assertEqual(obj.write_concern, WriteConcern())

        cursor = c.pymongo_test.test.find()
        self.assertEqual(
            ReadPreference.PRIMARY, cursor._Cursor__read_preference)

        tag_sets = [{'dc': 'la', 'rack': '2'}, {'foo': 'bar'}]
        secondary = Secondary(tag_sets=tag_sets)
        c = rs_client(
            maxPoolSize=25,
            document_class=SON,
            tz_aware=True,
            read_preference=secondary,
            localThresholdMS=77,
            j=True)

        self.assertEqual(c.max_pool_size, 25)

        for obj in c, c.pymongo_test, c.pymongo_test.test:
            self.assertEqual(obj.codec_options, CodecOptions(SON, True))
            self.assertEqual(obj.read_preference, secondary)
            self.assertEqual(obj.write_concern, WriteConcern(j=True))

        cursor = c.pymongo_test.test.find()
        self.assertEqual(
            secondary, cursor._Cursor__read_preference)

        nearest = Nearest(tag_sets=[{'dc': 'ny'}, {}])
        cursor = c.pymongo_test.get_collection(
            "test", read_preference=nearest).find()

        self.assertEqual(nearest, cursor._Cursor__read_preference)
        self.assertEqual(c.max_bson_size, 16777216)
        c.close()
    def test_mode_validation(self):
        for mode in (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED,
                     ReadPreference.SECONDARY,
                     ReadPreference.SECONDARY_PREFERRED,
                     ReadPreference.NEAREST):
            self.assertEqual(mode,
                             rs_client(read_preference=mode).read_preference)

        self.assertRaises(TypeError, rs_client, read_preference='foo')
    def test_gridfs_replica_set(self):
        rsc = rs_client(
            w=self.w,
            read_preference=ReadPreference.SECONDARY)

        gfs = gridfs.GridFSBucket(rsc.pymongo_test)
        oid = gfs.upload_from_stream("test_filename", b'foo')
        content = gfs.open_download_stream(oid).read()
        self.assertEqual(b'foo', content)
 def create_targets(self, *args, **kwargs):
     codec_options = kwargs.pop('codec_options', None)
     if codec_options:
         kwargs['type_registry'] = codec_options.type_registry
         kwargs['document_class'] = codec_options.document_class
     self.watched_target = rs_client(*args, **kwargs)
     self.input_target = self.watched_target[self.db.name].test
     # Insert a record to ensure db, coll are created.
     self.input_target.insert_one({'data': 'dummy'})
    def assertReadsFrom(self, expected, **kwargs):
        c = rs_client(**kwargs)
        wait_until(
            lambda: len(c.nodes - c.arbiters) == self.w,
            "discovered all nodes")

        used = self.read_from_which_kind(c)
        self.assertEqual(expected, used, 'Cursor used %s, expected %s' % (
            used, expected))
 def create_targets(self, *args, **kwargs):
     codec_options = kwargs.pop('codec_options', None)
     if codec_options:
         kwargs['type_registry'] = codec_options.type_registry
         kwargs['document_class'] = codec_options.document_class
     self.watched_target = rs_client(*args, **kwargs)
     self.input_target = self.watched_target[self.db.name].test
     # Insert a record to ensure db, coll are created.
     self.input_target.insert_one({'data': 'dummy'})
    def test_gridfs_replica_set(self):
        rsc = rs_client(w=self.w, read_preference=ReadPreference.SECONDARY)

        fs = gridfs.GridFS(rsc.gfsreplica, 'gfsreplicatest')

        gin = fs.new_file()
        self.assertEqual(gin._coll.read_preference, ReadPreference.PRIMARY)

        oid = fs.put(b'foo')
        content = fs.get(oid).read()
        self.assertEqual(b'foo', content)
    def test_threshold_validation(self):
        self.assertEqual(17, rs_client(
            localThresholdMS=17
        ).local_threshold_ms)

        self.assertEqual(42, rs_client(
            localThresholdMS=42
        ).local_threshold_ms)

        self.assertEqual(666, rs_client(
            localthresholdms=666
        ).local_threshold_ms)

        self.assertEqual(0, rs_client(
            localthresholdms=0
        ).local_threshold_ms)

        self.assertRaises(ValueError,
                          rs_client,
                          localthresholdms=-1)
    def test_threshold_validation(self):
        self.assertEqual(17, rs_client(
            localThresholdMS=17
        ).local_threshold_ms)

        self.assertEqual(42, rs_client(
            localThresholdMS=42
        ).local_threshold_ms)

        self.assertEqual(666, rs_client(
            localthresholdms=666
        ).local_threshold_ms)

        self.assertEqual(0, rs_client(
            localthresholdms=0
        ).local_threshold_ms)

        self.assertRaises(ValueError,
                          rs_client,
                          localthresholdms=-1)
Beispiel #30
0
 def test_unpin_committed_transaction(self):
     client = rs_client()
     self.addCleanup(client.close)
     pool = get_pool(client)
     coll = client[self.db.name].test
     with client.start_session() as session:
         with session.start_transaction():
             self.assertEqual(pool.active_sockets, 0)
             coll.insert_one({}, session=session)
             self.assertEqual(pool.active_sockets, 1)  # Pinned.
         self.assertEqual(pool.active_sockets, 1)  # Still pinned.
     self.assertEqual(pool.active_sockets, 0)  # Unpinned.
    def test_properties(self):
        c = client_context.client
        c.admin.command('ping')

        wait_until(lambda: c.primary == self.primary, "discover primary")
        wait_until(lambda: c.arbiters == self.arbiters, "discover arbiters")
        wait_until(lambda: c.secondaries == self.secondaries,
                   "discover secondaries")

        self.assertEqual(c.primary, self.primary)
        self.assertEqual(c.secondaries, self.secondaries)
        self.assertEqual(c.arbiters, self.arbiters)
        self.assertEqual(c.max_pool_size, 100)

        # Make sure MongoClient's properties are copied to Database and
        # Collection.
        for obj in c, c.pymongo_test, c.pymongo_test.test:
            self.assertEqual(obj.codec_options, CodecOptions())
            self.assertEqual(obj.read_preference, ReadPreference.PRIMARY)
            self.assertEqual(obj.write_concern, WriteConcern())

        cursor = c.pymongo_test.test.find()
        self.assertEqual(
            ReadPreference.PRIMARY, cursor._Cursor__read_preference)

        tag_sets = [{'dc': 'la', 'rack': '2'}, {'foo': 'bar'}]
        secondary = Secondary(tag_sets=tag_sets)
        c = rs_client(
            maxPoolSize=25,
            document_class=SON,
            tz_aware=True,
            read_preference=secondary,
            localThresholdMS=77,
            j=True)

        self.assertEqual(c.max_pool_size, 25)

        for obj in c, c.pymongo_test, c.pymongo_test.test:
            self.assertEqual(obj.codec_options, CodecOptions(SON, True))
            self.assertEqual(obj.read_preference, secondary)
            self.assertEqual(obj.write_concern, WriteConcern(j=True))

        cursor = c.pymongo_test.test.find()
        self.assertEqual(
            secondary, cursor._Cursor__read_preference)

        nearest = Nearest(tag_sets=[{'dc': 'ny'}, {}])
        cursor = c.pymongo_test.get_collection(
            "test", read_preference=nearest).find()

        self.assertEqual(nearest, cursor._Cursor__read_preference)
        self.assertEqual(c.max_bson_size, 16777216)
        c.close()
    def test_mode_validation(self):
        for mode in (ReadPreference.PRIMARY,
                     ReadPreference.PRIMARY_PREFERRED,
                     ReadPreference.SECONDARY,
                     ReadPreference.SECONDARY_PREFERRED,
                     ReadPreference.NEAREST):
            self.assertEqual(
                mode,
                rs_client(read_preference=mode).read_preference)

        self.assertRaises(
            TypeError,
            rs_client, read_preference='foo')
    def test_threshold_validation(self):
        self.assertEqual(
            17,
            rs_client(localThresholdMS=17,
                      connect=False).options.local_threshold_ms)

        self.assertEqual(
            42,
            rs_client(localThresholdMS=42,
                      connect=False).options.local_threshold_ms)

        self.assertEqual(
            666,
            rs_client(localThresholdMS=666,
                      connect=False).options.local_threshold_ms)

        self.assertEqual(
            0,
            rs_client(localThresholdMS=0,
                      connect=False).options.local_threshold_ms)

        self.assertRaises(ValueError, rs_client, localthresholdms=-1)
    def test_gridfs_replica_set(self):
        rsc = rs_client(
            w=self.w,
            read_preference=ReadPreference.SECONDARY)

        fs = gridfs.GridFS(rsc.pymongo_test)

        gin = fs.new_file()
        self.assertEqual(gin._coll.read_preference, ReadPreference.PRIMARY)

        oid = fs.put(b'foo')
        content = fs.get(oid).read()
        self.assertEqual(b'foo', content)
Beispiel #35
0
 def test_zero_latency(self):
     ping_times = set()
     # Generate unique ping times.
     while len(ping_times) < len(self.client.nodes):
         ping_times.add(random.random())
     for ping_time, host in zip(ping_times, self.client.nodes):
         ServerDescription._host_to_round_trip_time[host] = ping_time
     try:
         client = rs_client(readPreference='nearest', localThresholdMS=0)
         host = self.read_from_which_host(client)
         for _ in range(5):
             self.assertEqual(host, self.read_from_which_host(client))
     finally:
         ServerDescription._host_to_round_trip_time.clear()
    def test_transaction_write_concern_override(self):
        """Test txn overrides Client/Database/Collection write_concern."""
        client = rs_client(w=0)
        self.addCleanup(client.close)
        db = client.test
        coll = db.test
        coll.insert_one({})
        with client.start_session() as s:
            with s.start_transaction(write_concern=WriteConcern(w=1)):
                self.assertTrue(coll.insert_one({}, session=s).acknowledged)
                self.assertTrue(coll.insert_many(
                    [{}, {}], session=s).acknowledged)
                self.assertTrue(coll.bulk_write(
                    [InsertOne({})], session=s).acknowledged)
                self.assertTrue(coll.replace_one(
                    {}, {}, session=s).acknowledged)
                self.assertTrue(coll.update_one(
                    {}, {"$set": {"a": 1}}, session=s).acknowledged)
                self.assertTrue(coll.update_many(
                    {}, {"$set": {"a": 1}}, session=s).acknowledged)
                self.assertTrue(coll.delete_one({}, session=s).acknowledged)
                self.assertTrue(coll.delete_many({}, session=s).acknowledged)
                coll.find_one_and_delete({}, session=s)
                coll.find_one_and_replace({}, {}, session=s)
                coll.find_one_and_update({}, {"$set": {"a": 1}}, session=s)

        unsupported_txn_writes = [
            (client.drop_database, [db.name], {}),
            (db.create_collection, ['collection'], {}),
            (db.drop_collection, ['collection'], {}),
            (coll.drop, [], {}),
            (coll.map_reduce,
             ['function() {}', 'function() {}', 'output'], {}),
            (coll.rename, ['collection2'], {}),
            # Drop collection2 between tests of "rename", above.
            (coll.database.drop_collection, ['collection2'], {}),
            (coll.create_indexes, [[IndexModel('a')]], {}),
            (coll.create_index, ['a'], {}),
            (coll.drop_index, ['a_1'], {}),
            (coll.drop_indexes, [], {}),
            (coll.aggregate, [[{"$out": "aggout"}]], {}),
        ]
        for op in unsupported_txn_writes:
            op, args, kwargs = op
            with client.start_session() as s:
                kwargs['session'] = s
                s.start_transaction(write_concern=WriteConcern(w=1))
                with self.assertRaises(OperationFailure):
                    op(*args, **kwargs)
                s.abort_transaction()
    def test_tag_sets_validation(self):
        S = Secondary(tag_sets=[{}])
        self.assertEqual(
            [{}],
            rs_client(read_preference=S).read_preference.tag_sets)

        S = Secondary(tag_sets=[{'k': 'v'}])
        self.assertEqual(
            [{'k': 'v'}],
            rs_client(read_preference=S).read_preference.tag_sets)

        S = Secondary(tag_sets=[{'k': 'v'}, {}])
        self.assertEqual(
            [{'k': 'v'}, {}],
            rs_client(read_preference=S).read_preference.tag_sets)

        self.assertRaises(ValueError, Secondary, tag_sets=[])

        # One dict not ok, must be a list of dicts
        self.assertRaises(TypeError, Secondary, tag_sets={'k': 'v'})

        self.assertRaises(TypeError, Secondary, tag_sets='foo')

        self.assertRaises(TypeError, Secondary, tag_sets=['foo'])
    def test_tag_sets_validation(self):
        S = Secondary(tag_sets=[{}])
        self.assertEqual(
            [{}],
            rs_client(read_preference=S).read_preference.tag_sets)

        S = Secondary(tag_sets=[{'k': 'v'}])
        self.assertEqual(
            [{'k': 'v'}],
            rs_client(read_preference=S).read_preference.tag_sets)

        S = Secondary(tag_sets=[{'k': 'v'}, {}])
        self.assertEqual(
            [{'k': 'v'}, {}],
            rs_client(read_preference=S).read_preference.tag_sets)

        self.assertRaises(ValueError, Secondary, tag_sets=[])

        # One dict not ok, must be a list of dicts
        self.assertRaises(TypeError, Secondary, tag_sets={'k': 'v'})

        self.assertRaises(TypeError, Secondary, tag_sets='foo')

        self.assertRaises(TypeError, Secondary, tag_sets=['foo'])
Beispiel #39
0
    def test_tag_sets_validation(self):
        # Can't use tags with PRIMARY
        self.assertRaises(ConfigurationError,
                          _ServerMode,
                          0,
                          tag_sets=[{
                              'k': 'v'
                          }])

        # ... but empty tag sets are ok with PRIMARY
        self.assertRaises(ConfigurationError, _ServerMode, 0, tag_sets=[{}])

        S = Secondary(tag_sets=[{}])
        self.assertEqual([{}],
                         rs_client(read_preference=S).read_preference.tag_sets)

        S = Secondary(tag_sets=[{'k': 'v'}])
        self.assertEqual([{
            'k': 'v'
        }],
                         rs_client(read_preference=S).read_preference.tag_sets)

        S = Secondary(tag_sets=[{'k': 'v'}, {}])
        self.assertEqual([{
            'k': 'v'
        }, {}],
                         rs_client(read_preference=S).read_preference.tag_sets)

        self.assertRaises(ValueError, Secondary, tag_sets=[])

        # One dict not ok, must be a list of dicts
        self.assertRaises(TypeError, Secondary, tag_sets={'k': 'v'})

        self.assertRaises(TypeError, Secondary, tag_sets='foo')

        self.assertRaises(TypeError, Secondary, tag_sets=['foo'])
 def test_zero_latency(self):
     ping_times = set()
     # Generate unique ping times.
     while len(ping_times) < len(self.client.nodes):
         ping_times.add(random.random())
     for ping_time, host in zip(ping_times, self.client.nodes):
         ServerDescription._host_to_round_trip_time[host] = ping_time
     try:
         client = connected(
             rs_client(readPreference='nearest', localThresholdMS=0))
         wait_until(
             lambda: client.nodes == self.client.nodes,
             "discovered all nodes")
         host = self.read_from_which_host(client)
         for _ in range(5):
             self.assertEqual(host, self.read_from_which_host(client))
     finally:
         ServerDescription._host_to_round_trip_time.clear()
Beispiel #41
0
    def _test_no_gc_deadlock(self, create_resource):
        client = rs_client()
        self.addCleanup(client.close)
        pool = get_pool(client)
        coll = client[self.db.name].test
        coll.insert_many([{} for _ in range(10)])
        self.assertEqual(pool.active_sockets, 0)
        # Cause the initial find attempt to fail to induce a reference cycle.
        args = {
            "mode": {
              "times": 1
            },
            "data": {
              "failCommands": [
                "find", "aggregate"
              ],
              "errorCode": 91,
              "closeConnection": True,
            }
        }
        with self.fail_point(args):
            resource = create_resource(coll)
            if client_context.load_balancer:
                self.assertEqual(pool.active_sockets, 1)  # Pinned.

        thread = PoolLocker(pool)
        thread.start()
        self.assertTrue(thread.locked.wait(5), 'timed out')
        # Garbage collect the resource while the pool is locked to ensure we
        # don't deadlock.
        del resource
        # On PyPy it can take a few rounds to collect the cursor.
        for _ in range(3):
            gc.collect()
        thread.unlock.set()
        thread.join(5)
        self.assertFalse(thread.is_alive())
        self.assertIsNone(thread.exc)

        wait_until(lambda: pool.active_sockets == 0, 'return socket')
        # Run another operation to ensure the socket still works.
        coll.delete_many({})
Beispiel #42
0
 def test_zero_latency(self):
     if (client_context.version >= (3, 7, 2) and client_context.auth_enabled
             and client_context.is_rs):
         raise SkipTest("Disabled due to SERVER-32845")
     ping_times = set()
     # Generate unique ping times.
     while len(ping_times) < len(self.client.nodes):
         ping_times.add(random.random())
     for ping_time, host in zip(ping_times, self.client.nodes):
         ServerDescription._host_to_round_trip_time[host] = ping_time
     try:
         client = connected(
             rs_client(readPreference='nearest', localThresholdMS=0))
         wait_until(lambda: client.nodes == self.client.nodes,
                    "discovered all nodes")
         host = self.read_from_which_host(client)
         for _ in range(5):
             self.assertEqual(host, self.read_from_which_host(client))
     finally:
         ServerDescription._host_to_round_trip_time.clear()
 def test_send_hedge(self):
     cases = {
         'primaryPreferred': PrimaryPreferred,
         'secondary': Secondary,
         'secondaryPreferred': SecondaryPreferred,
         'nearest': Nearest,
     }
     listener = OvertCommandListener()
     client = rs_client(event_listeners=[listener])
     self.addCleanup(client.close)
     client.admin.command('ping')
     for mode, cls in cases.items():
         pref = cls(hedge={'enabled': True})
         coll = client.test.get_collection('test', read_preference=pref)
         listener.reset()
         coll.find_one()
         started = listener.results['started']
         self.assertEqual(len(started), 1, started)
         cmd = started[0].command
         self.assertIn('$readPreference', cmd)
         self.assertEqual(cmd['$readPreference'], pref.document)
Beispiel #44
0
    def test_create_collection(self):
        client = rs_client()
        self.addCleanup(client.close)
        db = client.pymongo_test
        coll = db.test_create_collection
        self.addCleanup(coll.drop)
        with client.start_session() as s, s.start_transaction():
            coll2 = db.create_collection(coll.name, session=s)
            self.assertEqual(coll, coll2)
            coll.insert_one({}, session=s)

        # Outside a transaction we raise CollectionInvalid on existing colls.
        with self.assertRaises(CollectionInvalid):
            db.create_collection(coll.name)

        # Inside a transaction we raise the OperationFailure from create.
        with client.start_session() as s:
            s.start_transaction()
            with self.assertRaises(OperationFailure) as ctx:
                db.create_collection(coll.name, session=s)
            self.assertEqual(ctx.exception.code, 48)  # NamespaceExists
Beispiel #45
0
    def _test_kill_cursor_explicit(self, read_pref):
        with client_knobs(kill_cursor_frequency=0.01):
            c = rs_client(read_preference=read_pref, w=self.w)
            db = c.pymongo_test
            db.drop_collection("test")

            test = db.test
            test.insert_many([{"i": i} for i in range(20)])

            # Partially evaluate cursor so it's left alive, then kill it
            cursor = test.find().batch_size(10)
            next(cursor)
            self.assertNotEqual(0, cursor.cursor_id)

            if read_pref == ReadPreference.PRIMARY:
                msg = "Expected cursor's address to be %s, got %s" % (
                    c.primary, cursor.address)

                self.assertEqual(cursor.address, c.primary, msg)
            else:
                self.assertNotEqual(
                    cursor.address, c.primary,
                    "Expected cursor's address not to be primary")

            cursor_id = cursor.cursor_id

            # Cursor dead on server - trigger a getMore on the same cursor_id
            # and check that the server returns an error.
            cursor2 = cursor.clone()
            cursor2._Cursor__id = cursor_id

            if sys.platform.startswith('java') or 'PyPy' in sys.version:
                # Explicitly kill cursor.
                cursor.close()
            else:
                # Implicitly kill it in CPython.
                del cursor

            time.sleep(5)
            self.assertRaises(OperationFailure, lambda: list(cursor2))
    def _test_kill_cursor_explicit(self, read_pref):
        with client_knobs(kill_cursor_frequency=0.01):
            c = rs_client(read_preference=read_pref, w=self.w)
            db = c.pymongo_test
            db.drop_collection("test")

            test = db.test
            test.insert_many([{"i": i} for i in range(20)])

            # Partially evaluate cursor so it's left alive, then kill it
            cursor = test.find().batch_size(10)
            next(cursor)
            self.assertNotEqual(0, cursor.cursor_id)

            if read_pref == ReadPreference.PRIMARY:
                msg = "Expected cursor's address to be %s, got %s" % (
                    c.primary, cursor.address)

                self.assertEqual(cursor.address, c.primary, msg)
            else:
                self.assertNotEqual(
                    cursor.address, c.primary,
                    "Expected cursor's address not to be primary")

            cursor_id = cursor.cursor_id

            # Cursor dead on server - trigger a getMore on the same cursor_id
            # and check that the server returns an error.
            cursor2 = cursor.clone()
            cursor2._Cursor__id = cursor_id

            if sys.platform.startswith('java') or 'PyPy' in sys.version:
                # Explicitly kill cursor.
                cursor.close()
            else:
                # Implicitly kill it in CPython.
                del cursor

            time.sleep(5)
            self.assertRaises(OperationFailure, lambda: list(cursor2))
    def run_scenario(self):
        listener = OvertCommandListener()
        # New client, to avoid interference from pooled sessions.
        # Convert test['clientOptions'] to dict to avoid a Jython bug using "**"
        # with ScenarioDict.
        client = rs_client(event_listeners=[listener],
                           **dict(test.get('clientOptions', {})))
        # Close the client explicitly to avoid having too many threads open.
        self.addCleanup(client.close)

        # Get database and collection objects.
        database = getattr(
            client, scenario_def.get('database_name', TEST_DB))
        drop_collections(database)
        collection = getattr(
            database, scenario_def.get('collection_name', TEST_COLLECTION))

        # Populate collection with data and run test.
        collection.with_options(
            write_concern=WriteConcern(w="majority")).insert_many(
            scenario_def.get('data', []))
        listener.results.clear()
        self.run_operation(collection, test)

        # Assert expected events.
        self.check_events(test.get('expectations', {}), listener)

        # Assert final state is expected.
        expected_outcome = test.get('outcome', {}).get('collection')
        if expected_outcome is not None:
            collname = expected_outcome.get('name')
            if collname is not None:
                o_collection = getattr(database, collname)
            else:
                o_collection = collection
            o_collection = o_collection.with_options(
                read_concern=ReadConcern(level="local"))
            self.assertEqual(list(o_collection.find()),
                             expected_outcome['data'])
Beispiel #48
0
    def test_versioned_api(self):
        # Versioned API examples
        MongoClient = lambda _, server_api: rs_client(server_api=server_api,
                                                      connect=False)
        uri = None

        # Start Versioned API Example 1
        from pymongo.server_api import ServerApi
        client = MongoClient(uri, server_api=ServerApi("1"))
        # End Versioned API Example 1

        # Start Versioned API Example 2
        client = MongoClient(uri, server_api=ServerApi("1", strict=True))
        # End Versioned API Example 2

        # Start Versioned API Example 3
        client = MongoClient(uri, server_api=ServerApi("1", strict=False))
        # End Versioned API Example 3

        # Start Versioned API Example 4
        client = MongoClient(uri,
                             server_api=ServerApi("1",
                                                  deprecation_errors=True))
Beispiel #49
0
    def run_scenario(self):
        listener = OvertCommandListener()
        # New client, to avoid interference from pooled sessions.
        # Convert test['clientOptions'] to dict to avoid a Jython bug using "**"
        # with ScenarioDict.
        client = rs_client(event_listeners=[listener],
                           **dict(test.get('clientOptions', {})))
        # Close the client explicitly to avoid having too many threads open.
        self.addCleanup(client.close)

        # Get database and collection objects.
        database = getattr(client, scenario_def.get('database_name', TEST_DB))
        drop_collections(database)
        collection = getattr(
            database, scenario_def.get('collection_name', TEST_COLLECTION))

        # Populate collection with data and run test.
        collection.with_options(write_concern=WriteConcern(
            w="majority")).insert_many(scenario_def.get('data', []))
        listener.results.clear()
        self.run_operation(collection, test)

        # Assert expected events.
        self.check_events(test.get('expectations', {}), listener)

        # Assert final state is expected.
        expected_outcome = test.get('outcome', {}).get('collection')
        if expected_outcome is not None:
            collname = expected_outcome.get('name')
            if collname is not None:
                o_collection = getattr(database, collname)
            else:
                o_collection = collection
            o_collection = o_collection.with_options(read_concern=ReadConcern(
                level="local"))
            self.assertEqual(list(o_collection.find()),
                             expected_outcome['data'])
    def test_commit_not_retried_after_timeout(self):
        listener = OvertCommandListener()
        client = rs_client(event_listeners=[listener])
        self.addCleanup(client.close)
        coll = client[self.db.name].test

        def callback(session):
            coll.insert_one({}, session=session)

        # Create the collection.
        coll.insert_one({})
        self.set_fail_point({
            'configureFailPoint': 'failCommand',
            'mode': {
                'times': 2
            },
            'data': {
                'failCommands': ['commitTransaction'],
                'closeConnection': True
            }
        })
        self.addCleanup(self.set_fail_point, {
            'configureFailPoint': 'failCommand',
            'mode': 'off'
        })
        listener.results.clear()

        with client.start_session() as s:
            with PatchSessionTimeout(0):
                with self.assertRaises(ConnectionFailure):
                    s.with_transaction(callback)

        # One insert for the callback and two commits (includes the automatic
        # retry).
        self.assertEqual(listener.started_command_names(),
                         ['insert', 'commitTransaction', 'commitTransaction'])
    def run_scenario(self):
        if test.get('skipReason'):
            raise unittest.SkipTest(test.get('skipReason'))

        listener = OvertCommandListener()
        # Create a new client, to avoid interference from pooled sessions.
        # Convert test['clientOptions'] to dict to avoid a Jython bug using
        # "**" with ScenarioDict.
        client_options = dict(test['clientOptions'])
        use_multi_mongos = test['useMultipleMongoses']
        if client_context.is_mongos and use_multi_mongos:
            client = rs_client(client_context.mongos_seeds(),
                               event_listeners=[listener], **client_options)
        else:
            client = rs_client(event_listeners=[listener], **client_options)
        # Close the client explicitly to avoid having too many threads open.
        self.addCleanup(client.close)

        # Kill all sessions before and after each test to prevent an open
        # transaction (from a test failure) from blocking collection/database
        # operations during test set up and tear down.
        self.kill_all_sessions()
        self.addCleanup(self.kill_all_sessions)

        database_name = scenario_def['database_name']
        collection_name = scenario_def['collection_name']
        # Don't use the test client to load data.
        write_concern_db = client_context.client.get_database(
            database_name, write_concern=WriteConcern(w='majority'))
        write_concern_coll = write_concern_db[collection_name]
        write_concern_coll.drop()
        write_concern_db.create_collection(collection_name)
        if scenario_def['data']:
            # Load data.
            write_concern_coll.insert_many(scenario_def['data'])

        # SPEC-1245 workaround StaleDbVersion on distinct
        for c in self.mongos_clients:
            c[database_name][collection_name].distinct("x")

        # Create session0 and session1.
        sessions = {}
        session_ids = {}
        for i in range(2):
            session_name = 'session%d' % i
            opts = camel_to_snake_args(test['sessionOptions'][session_name])
            if 'default_transaction_options' in opts:
                txn_opts = opts['default_transaction_options']
                if 'readConcern' in txn_opts:
                    read_concern = ReadConcern(
                        **dict(txn_opts['readConcern']))
                else:
                    read_concern = None
                if 'writeConcern' in txn_opts:
                    write_concern = WriteConcern(
                        **dict(txn_opts['writeConcern']))
                else:
                    write_concern = None

                if 'readPreference' in txn_opts:
                    read_pref = parse_read_preference(
                        txn_opts['readPreference'])
                else:
                    read_pref = None

                txn_opts = client_session.TransactionOptions(
                    read_concern=read_concern,
                    write_concern=write_concern,
                    read_preference=read_pref,
                )
                opts['default_transaction_options'] = txn_opts

            s = client.start_session(**dict(opts))

            sessions[session_name] = s
            # Store lsid so we can access it after end_session, in check_events.
            session_ids[session_name] = s.session_id

        self.addCleanup(end_sessions, sessions)

        if 'failPoint' in test:
            self.set_fail_point(test['failPoint'])
            self.addCleanup(self.set_fail_point, {
                'configureFailPoint': 'failCommand', 'mode': 'off'})

        listener.results.clear()
        collection = client[database_name][collection_name]

        self.run_operations(sessions, collection, test['operations'])

        for s in sessions.values():
            s.end_session()

        self.check_events(test, listener, session_ids)

        # Disable fail points.
        self.set_fail_point({
            'configureFailPoint': 'failCommand', 'mode': 'off'})

        # Assert final state is expected.
        expected_c = test['outcome'].get('collection')
        if expected_c is not None:
            # Read from the primary with local read concern to ensure causal
            # consistency.
            primary_coll = collection.with_options(
                read_preference=ReadPreference.PRIMARY,
                read_concern=ReadConcern('local'))
            self.assertEqual(list(primary_coll.find()), expected_c['data'])
    def run_scenario(self):
        listener = OvertCommandListener()
        # New client, to avoid interference from pooled sessions.
        # Convert test['clientOptions'] to dict to avoid a Jython bug using "**"
        # with ScenarioDict.
        client = rs_client(event_listeners=[listener],
                           **dict(test['clientOptions']))
        # Close the client explicitly to avoid having too many threads open.
        self.addCleanup(client.close)

        # Kill all sessions before and after each test to prevent an open
        # transaction (from a test failure) from blocking collection/database
        # operations during test set up and tear down.
        def kill_all_sessions():
            try:
                client.admin.command('killAllSessions', [])
            except OperationFailure:
                # "operation was interrupted" by killing the command's
                # own session.
                pass

        kill_all_sessions()
        self.addCleanup(kill_all_sessions)

        database_name = scenario_def['database_name']
        collection_name = scenario_def['collection_name']
        write_concern_db = client.get_database(
            database_name, write_concern=WriteConcern(w='majority'))
        write_concern_coll = write_concern_db[collection_name]
        write_concern_coll.drop()
        write_concern_db.create_collection(collection_name)
        if scenario_def['data']:
            # Load data.
            write_concern_coll.insert_many(scenario_def['data'])

        # Create session0 and session1.
        sessions = {}
        session_ids = {}
        for i in range(2):
            session_name = 'session%d' % i
            opts = camel_to_snake_args(test['sessionOptions'][session_name])
            if 'default_transaction_options' in opts:
                txn_opts = opts['default_transaction_options']
                if 'readConcern' in txn_opts:
                    read_concern = ReadConcern(**dict(txn_opts['readConcern']))
                else:
                    read_concern = None
                if 'writeConcern' in txn_opts:
                    write_concern = WriteConcern(
                        **dict(txn_opts['writeConcern']))
                else:
                    write_concern = None

                if 'readPreference' in txn_opts:
                    read_pref = parse_read_preference(
                        txn_opts['readPreference'])
                else:
                    read_pref = None

                txn_opts = client_session.TransactionOptions(
                    read_concern=read_concern,
                    write_concern=write_concern,
                    read_preference=read_pref,
                )
                opts['default_transaction_options'] = txn_opts

            s = client.start_session(**dict(opts))

            sessions[session_name] = s
            # Store lsid so we can access it after end_session, in check_events.
            session_ids[session_name] = s.session_id

        self.addCleanup(end_sessions, sessions)

        if 'failPoint' in test:
            self.set_fail_point(test['failPoint'])
            self.addCleanup(self.set_fail_point, {
                'configureFailPoint': 'failCommand',
                'mode': 'off'
            })

        listener.results.clear()
        collection = client[database_name][collection_name]

        for op in test['operations']:
            expected_result = op.get('result')
            if expect_error(expected_result):
                with self.assertRaises(PyMongoError,
                                       msg=op['name']) as context:
                    self.run_operation(sessions, collection, op.copy())

                if expect_error_message(expected_result):
                    self.assertIn(expected_result['errorContains'].lower(),
                                  str(context.exception).lower())
                if expect_error_code(expected_result):
                    self.assertEqual(expected_result['errorCodeName'],
                                     context.exception.details.get('codeName'))
                if expect_error_labels_contain(expected_result):
                    self.assertErrorLabelsContain(
                        context.exception,
                        expected_result['errorLabelsContain'])
                if expect_error_labels_omit(expected_result):
                    self.assertErrorLabelsOmit(
                        context.exception, expected_result['errorLabelsOmit'])
            else:
                result = self.run_operation(sessions, collection, op.copy())
                if 'result' in op:
                    if op['name'] == 'runCommand':
                        self.check_command_result(expected_result, result)
                    else:
                        self.check_result(expected_result, result)

        for s in sessions.values():
            s.end_session()

        self.check_events(test, listener, session_ids)

        # Assert final state is expected.
        expected_c = test['outcome'].get('collection')
        if expected_c is not None:
            # Read from the primary to ensure causal consistency.
            primary_coll = collection.with_options(
                read_preference=ReadPreference.PRIMARY)
            self.assertEqual(list(primary_coll.find()), expected_c['data'])
Beispiel #53
0
    def test_transactions(self):
        # Transaction examples
        client = self.client
        self.addCleanup(client.drop_database, "hr")
        self.addCleanup(client.drop_database, "reporting")

        employees = client.hr.employees
        events = client.reporting.events
        employees.insert_one({"employee": 3, "status": "Active"})
        events.insert_one({
            "employee": 3,
            "status": {
                "new": "Active",
                "old": None
            }
        })

        # Start Transactions Intro Example 1

        def update_employee_info(session):
            employees_coll = session.client.hr.employees
            events_coll = session.client.reporting.events

            with session.start_transaction(
                    read_concern=ReadConcern("snapshot"),
                    write_concern=WriteConcern(w="majority")):
                employees_coll.update_one({"employee": 3},
                                          {"$set": {
                                              "status": "Inactive"
                                          }},
                                          session=session)
                events_coll.insert_one(
                    {
                        "employee": 3,
                        "status": {
                            "new": "Inactive",
                            "old": "Active"
                        }
                    },
                    session=session)

                while True:
                    try:
                        # Commit uses write concern set at transaction start.
                        session.commit_transaction()
                        print("Transaction committed.")
                        break
                    except (ConnectionFailure, OperationFailure) as exc:
                        # Can retry commit
                        if exc.has_error_label(
                                "UnknownTransactionCommitResult"):
                            print("UnknownTransactionCommitResult, retrying "
                                  "commit operation ...")
                            continue
                        else:
                            print("Error during commit ...")
                            raise

        # End Transactions Intro Example 1

        with client.start_session() as session:
            update_employee_info(session)

        employee = employees.find_one({"employee": 3})
        self.assertIsNotNone(employee)
        self.assertEqual(employee['status'], 'Inactive')

        # Start Transactions Retry Example 1
        def run_transaction_with_retry(txn_func, session):
            while True:
                try:
                    txn_func(session)  # performs transaction
                    break
                except (ConnectionFailure, OperationFailure) as exc:
                    print("Transaction aborted. Caught exception during "
                          "transaction.")

                    # If transient error, retry the whole transaction
                    if exc.has_error_label("TransientTransactionError"):
                        print("TransientTransactionError, retrying"
                              "transaction ...")
                        continue
                    else:
                        raise

        # End Transactions Retry Example 1

        with client.start_session() as session:
            run_transaction_with_retry(update_employee_info, session)

        employee = employees.find_one({"employee": 3})
        self.assertIsNotNone(employee)
        self.assertEqual(employee['status'], 'Inactive')

        # Start Transactions Retry Example 2
        def commit_with_retry(session):
            while True:
                try:
                    # Commit uses write concern set at transaction start.
                    session.commit_transaction()
                    print("Transaction committed.")
                    break
                except (ConnectionFailure, OperationFailure) as exc:
                    # Can retry commit
                    if exc.has_error_label("UnknownTransactionCommitResult"):
                        print("UnknownTransactionCommitResult, retrying "
                              "commit operation ...")
                        continue
                    else:
                        print("Error during commit ...")
                        raise

        # End Transactions Retry Example 2

        # Test commit_with_retry from the previous examples
        def _insert_employee_retry_commit(session):
            with session.start_transaction():
                employees.insert_one({
                    "employee": 4,
                    "status": "Active"
                },
                                     session=session)
                events.insert_one(
                    {
                        "employee": 4,
                        "status": {
                            "new": "Active",
                            "old": None
                        }
                    },
                    session=session)

                commit_with_retry(session)

        with client.start_session() as session:
            run_transaction_with_retry(_insert_employee_retry_commit, session)

        employee = employees.find_one({"employee": 4})
        self.assertIsNotNone(employee)
        self.assertEqual(employee['status'], 'Active')

        # Start Transactions Retry Example 3

        def run_transaction_with_retry(txn_func, session):
            while True:
                try:
                    txn_func(session)  # performs transaction
                    break
                except (ConnectionFailure, OperationFailure) as exc:
                    # If transient error, retry the whole transaction
                    if exc.has_error_label("TransientTransactionError"):
                        print("TransientTransactionError, retrying "
                              "transaction ...")
                        continue
                    else:
                        raise

        def commit_with_retry(session):
            while True:
                try:
                    # Commit uses write concern set at transaction start.
                    session.commit_transaction()
                    print("Transaction committed.")
                    break
                except (ConnectionFailure, OperationFailure) as exc:
                    # Can retry commit
                    if exc.has_error_label("UnknownTransactionCommitResult"):
                        print("UnknownTransactionCommitResult, retrying "
                              "commit operation ...")
                        continue
                    else:
                        print("Error during commit ...")
                        raise

        # Updates two collections in a transactions

        def update_employee_info(session):
            employees_coll = session.client.hr.employees
            events_coll = session.client.reporting.events

            with session.start_transaction(
                    read_concern=ReadConcern("snapshot"),
                    write_concern=WriteConcern(w="majority"),
                    read_preference=ReadPreference.PRIMARY):
                employees_coll.update_one({"employee": 3},
                                          {"$set": {
                                              "status": "Inactive"
                                          }},
                                          session=session)
                events_coll.insert_one(
                    {
                        "employee": 3,
                        "status": {
                            "new": "Inactive",
                            "old": "Active"
                        }
                    },
                    session=session)

                commit_with_retry(session)

        # Start a session.
        with client.start_session() as session:
            try:
                run_transaction_with_retry(update_employee_info, session)
            except Exception as exc:
                # Do something with error.
                raise

        # End Transactions Retry Example 3

        employee = employees.find_one({"employee": 3})
        self.assertIsNotNone(employee)
        self.assertEqual(employee['status'], 'Inactive')

        MongoClient = lambda _: rs_client()
        uriString = None

        # Start Transactions withTxn API Example 1

        # For a replica set, include the replica set name and a seedlist of the members in the URI string; e.g.
        # uriString = 'mongodb://mongodb0.example.com:27017,mongodb1.example.com:27017/?replicaSet=myRepl'
        # For a sharded cluster, connect to the mongos instances; e.g.
        # uriString = 'mongodb://mongos0.example.com:27017,mongos1.example.com:27017/'

        client = MongoClient(uriString)
        wc_majority = WriteConcern("majority", wtimeout=1000)

        # Prereq: Create collections.
        client.get_database("mydb1", write_concern=wc_majority).foo.insert_one(
            {'abc': 0})
        client.get_database("mydb2", write_concern=wc_majority).bar.insert_one(
            {'xyz': 0})

        # Step 1: Define the callback that specifies the sequence of operations to perform inside the transactions.
        def callback(session):
            collection_one = session.client.mydb1.foo
            collection_two = session.client.mydb2.bar

            # Important:: You must pass the session to the operations.
            collection_one.insert_one({'abc': 1}, session=session)
            collection_two.insert_one({'xyz': 999}, session=session)

        # Step 2: Start a client session.
        with client.start_session() as session:
            # Step 3: Use with_transaction to start a transaction, execute the callback, and commit (or abort on error).
            session.with_transaction(callback,
                                     read_concern=ReadConcern('local'),
                                     write_concern=wc_majority,
                                     read_preference=ReadPreference.PRIMARY)
Beispiel #54
0
    def run_scenario(self, scenario_def, test):
        self.maybe_skip_scenario(test)

        # Kill all sessions before and after each test to prevent an open
        # transaction (from a test failure) from blocking collection/database
        # operations during test set up and tear down.
        self.kill_all_sessions()
        self.addCleanup(self.kill_all_sessions)
        self.setup_scenario(scenario_def)
        database_name = self.get_scenario_db_name(scenario_def)
        collection_name = self.get_scenario_coll_name(scenario_def)
        # SPEC-1245 workaround StaleDbVersion on distinct
        for c in self.mongos_clients:
            c[database_name][collection_name].distinct("x")

        # Configure the fail point before creating the client.
        if 'failPoint' in test:
            fp = test['failPoint']
            self.set_fail_point(fp)
            self.addCleanup(self.set_fail_point, {
                'configureFailPoint': fp['configureFailPoint'],
                'mode': 'off'
            })

        listener = OvertCommandListener()
        pool_listener = CMAPListener()
        server_listener = ServerAndTopologyEventListener()
        # Create a new client, to avoid interference from pooled sessions.
        client_options = self.parse_client_options(test['clientOptions'])
        # MMAPv1 does not support retryable writes.
        if (client_options.get('retryWrites') is True
                and client_context.storage_engine == 'mmapv1'):
            self.skipTest("MMAPv1 does not support retryWrites=True")
        use_multi_mongos = test['useMultipleMongoses']
        if client_context.is_mongos and use_multi_mongos:
            client = rs_client(
                client_context.mongos_seeds(),
                event_listeners=[listener, pool_listener, server_listener],
                **client_options)
        else:
            client = rs_client(
                event_listeners=[listener, pool_listener, server_listener],
                **client_options)
        self.scenario_client = client
        self.listener = listener
        self.pool_listener = pool_listener
        self.server_listener = server_listener
        # Close the client explicitly to avoid having too many threads open.
        self.addCleanup(client.close)

        # Create session0 and session1.
        sessions = {}
        session_ids = {}
        for i in range(2):
            # Don't attempt to create sessions if they are not supported by
            # the running server version.
            if not client_context.sessions_enabled:
                break
            session_name = 'session%d' % i
            opts = camel_to_snake_args(test['sessionOptions'][session_name])
            if 'default_transaction_options' in opts:
                txn_opts = self.parse_options(
                    opts['default_transaction_options'])
                txn_opts = client_session.TransactionOptions(**txn_opts)
                opts['default_transaction_options'] = txn_opts

            s = client.start_session(**dict(opts))

            sessions[session_name] = s
            # Store lsid so we can access it after end_session, in check_events.
            session_ids[session_name] = s.session_id

        self.addCleanup(end_sessions, sessions)

        collection = client[database_name][collection_name]
        self.run_test_ops(sessions, collection, test)

        end_sessions(sessions)

        self.check_events(test, listener, session_ids)

        # Disable fail points.
        if 'failPoint' in test:
            fp = test['failPoint']
            self.set_fail_point({
                'configureFailPoint': fp['configureFailPoint'],
                'mode': 'off'
            })

        # Assert final state is expected.
        outcome = test['outcome']
        expected_c = outcome.get('collection')
        if expected_c is not None:
            outcome_coll_name = self.get_outcome_coll_name(outcome, collection)

            # Read from the primary with local read concern to ensure causal
            # consistency.
            outcome_coll = client_context.client[
                collection.database.name].get_collection(
                    outcome_coll_name,
                    read_preference=ReadPreference.PRIMARY,
                    read_concern=ReadConcern('local'))
            actual_data = list(outcome_coll.find(sort=[('_id', 1)]))

            # The expected data needs to be the left hand side here otherwise
            # CompareType(Binary) doesn't work.
            self.assertEqual(wrap_types(expected_c['data']), actual_data)
    def run_scenario(self):
        listener = OvertCommandListener()
        # New client, to avoid interference from pooled sessions.
        # Convert test['clientOptions'] to dict to avoid a Jython bug using "**"
        # with ScenarioDict.
        client = rs_client(event_listeners=[listener],
                           **dict(test['clientOptions']))

        # Kill all sessions before and after each test to prevent an open
        # transaction (from a test failure) from blocking collection/database
        # operations during test set up and tear down.
        def kill_all_sessions():
            try:
                client.admin.command('killAllSessions', [])
            except OperationFailure:
                # "operation was interrupted" by killing the command's
                # own session.
                pass
        kill_all_sessions()
        self.addCleanup(kill_all_sessions)

        database_name = scenario_def['database_name']
        collection_name = scenario_def['collection_name']
        write_concern_db = client.get_database(
            database_name, write_concern=WriteConcern(w='majority'))
        write_concern_coll = write_concern_db[collection_name]
        write_concern_coll.drop()
        write_concern_db.create_collection(collection_name)
        if scenario_def['data']:
            # Load data.
            write_concern_coll.insert_many(scenario_def['data'])

        # Create session0 and session1.
        sessions = {}
        session_ids = {}
        for i in range(2):
            session_name = 'session%d' % i
            opts = camel_to_snake_args(test['sessionOptions'][session_name])
            if 'default_transaction_options' in opts:
                txn_opts = opts['default_transaction_options']
                if 'readConcern' in txn_opts:
                    read_concern = ReadConcern(
                        **dict(txn_opts['readConcern']))
                else:
                    read_concern = None
                if 'writeConcern' in txn_opts:
                    write_concern = WriteConcern(
                        **dict(txn_opts['writeConcern']))
                else:
                    write_concern = None

                if 'readPreference' in txn_opts:
                    read_pref = parse_read_preference(
                        txn_opts['readPreference'])
                else:
                    read_pref = None

                txn_opts = client_session.TransactionOptions(
                    read_concern=read_concern,
                    write_concern=write_concern,
                    read_preference=read_pref,
                )
                opts['default_transaction_options'] = txn_opts

            s = client.start_session(**dict(opts))

            sessions[session_name] = s
            # Store lsid so we can access it after end_session, in check_events.
            session_ids[session_name] = s.session_id

        self.addCleanup(end_sessions, sessions)

        if 'failPoint' in test:
            self.set_fail_point(test['failPoint'])
            self.addCleanup(self.set_fail_point, {
                'configureFailPoint': 'failCommand', 'mode': 'off'})

        listener.results.clear()
        collection = client[database_name][collection_name]

        for op in test['operations']:
            expected_result = op.get('result')
            if expect_error(expected_result):
                with self.assertRaises(PyMongoError,
                                       msg=op['name']) as context:
                    self.run_operation(sessions, collection, op.copy())

                if expect_error_message(expected_result):
                    self.assertIn(expected_result['errorContains'].lower(),
                                  str(context.exception).lower())
                if expect_error_code(expected_result):
                    self.assertEqual(expected_result['errorCodeName'],
                                     context.exception.details.get('codeName'))
                if expect_error_labels_contain(expected_result):
                    self.assertErrorLabelsContain(
                        context.exception,
                        expected_result['errorLabelsContain'])
                if expect_error_labels_omit(expected_result):
                    self.assertErrorLabelsOmit(
                        context.exception,
                        expected_result['errorLabelsOmit'])
            else:
                result = self.run_operation(sessions, collection, op.copy())
                if 'result' in op:
                    if op['name'] == 'runCommand':
                        self.check_command_result(expected_result, result)
                    else:
                        self.check_result(expected_result, result)

        for s in sessions.values():
            s.end_session()

        self.check_events(test, listener, session_ids)

        # Assert final state is expected.
        expected_c = test['outcome'].get('collection')
        if expected_c is not None:
            # Read from the primary to ensure causal consistency.
            primary_coll = collection.with_options(
                read_preference=ReadPreference.PRIMARY)
            self.assertEqual(list(primary_coll.find()), expected_c['data'])