def test_mongo_client(self):
        pair = client_context.pair
        m = rs_or_single_client(w=0)
        coll = m.pymongo_test.write_concern_test
        coll.drop()
        doc = {"_id": ObjectId()}
        coll.insert_one(doc)
        self.assertTrue(coll.insert_one(doc))
        coll = coll.with_options(write_concern=WriteConcern(w=1))
        self.assertRaises(OperationFailure, coll.insert_one, doc)

        m = rs_or_single_client()
        coll = m.pymongo_test.write_concern_test
        new_coll = coll.with_options(write_concern=WriteConcern(w=0))
        self.assertTrue(new_coll.insert_one(doc))
        self.assertRaises(OperationFailure, coll.insert_one, doc)

        m = rs_or_single_client("mongodb://%s/" % (pair,),
                                replicaSet=client_context.replica_set_name)

        coll = m.pymongo_test.write_concern_test
        self.assertRaises(OperationFailure, coll.insert_one, doc)
        m = rs_or_single_client("mongodb://%s/?w=0" % (pair,),
                                replicaSet=client_context.replica_set_name)

        coll = m.pymongo_test.write_concern_test
        coll.insert_one(doc)

        # Equality tests
        direct = connected(single_client(w=0))
        direct2 = connected(single_client("mongodb://%s/?w=0" % (pair,),
                                          **self.credentials))
        self.assertEqual(direct, direct2)
        self.assertFalse(direct != direct2)
Exemplo n.º 2
0
    def test_auth_from_uri(self):
        self.client.admin.add_user("admin", "pass", roles=["root"])
        self.addCleanup(self.client.admin.remove_user, "admin")
        self.addCleanup(remove_all_users, self.client.pymongo_test)

        self.client.pymongo_test.add_user("user", "pass", roles=["userAdmin", "readWrite"])

        with self.assertRaises(OperationFailure):
            connected(rs_or_single_client("mongodb://*****:*****@%s:%d" % (host, port)))

        # No error.
        connected(rs_or_single_client_noauth("mongodb://*****:*****@%s:%d" % (host, port)))

        # Wrong database.
        uri = "mongodb://*****:*****@%s:%d/pymongo_test" % (host, port)
        with self.assertRaises(OperationFailure):
            connected(rs_or_single_client(uri))

        # No error.
        connected(rs_or_single_client_noauth("mongodb://*****:*****@%s:%d/pymongo_test" % (host, port)))

        # Auth with lazy connection.
        rs_or_single_client(
            "mongodb://*****:*****@%s:%d/pymongo_test" % (host, port), connect=False
        ).pymongo_test.test.find_one()

        # Wrong password.
        bad_client = rs_or_single_client("mongodb://*****:*****@%s:%d/pymongo_test" % (host, port), connect=False)

        self.assertRaises(OperationFailure, bad_client.pymongo_test.test.find_one)
Exemplo n.º 3
0
    def test_socket_timeout_ms_validation(self):
        c = rs_or_single_client(socketTimeoutMS=10 * 1000)
        self.assertEqual(10, get_pool(c).opts.socket_timeout)

        c = connected(rs_or_single_client(socketTimeoutMS=None))
        self.assertEqual(None, get_pool(c).opts.socket_timeout)

        self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS=0)

        self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS=-1)

        self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS=1e10)

        self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS="foo")
Exemplo n.º 4
0
    def test_lazy_connect_w0(self):
        # Ensure that connect-on-demand works when the first operation is
        # an unacknowledged write. This exercises _writable_max_wire_version().

        # Use a separate collection to avoid races where we're still
        # completing an operation on a collection while the next test begins.
        client = rs_or_single_client(connect=False, w=0)
        client.test_lazy_connect_w0.test.insert_one({})

        client = rs_or_single_client(connect=False)
        client.test_lazy_connect_w0.test.update_one({}, {'$set': {'x': 1}})

        client = rs_or_single_client(connect=False)
        client.test_lazy_connect_w0.test.delete_one({})
    def test_max_staleness_float(self):
        with self.assertRaises(TypeError) as ctx:
            rs_or_single_client(maxStalenessSeconds=1.5,
                                readPreference="nearest")

        self.assertIn("must be an integer", str(ctx.exception))

        with warnings.catch_warnings(record=True) as ctx:
            warnings.simplefilter("always")
            client = MongoClient("mongodb://host/?maxStalenessSeconds=1.5"
                                 "&readPreference=nearest")

            # Option was ignored.
            self.assertEqual(-1, client.read_preference.max_staleness)
            self.assertIn("must be an integer", str(ctx[0]))
    def test_survive_cursor_not_found(self):
        # By default the find command returns 101 documents in the first batch.
        # Use 102 batches to cause a single getMore.
        chunk_size = 1024
        data = b'd' * (102 * chunk_size)
        listener = EventListener()
        client = rs_or_single_client(event_listeners=[listener])
        db = client.pymongo_test
        with GridIn(db.fs, chunk_size=chunk_size) as infile:
            infile.write(data)

        with GridOut(db.fs, infile._id) as outfile:
            self.assertEqual(len(outfile.readchunk()), chunk_size)

            # Kill the cursor to simulate the cursor timing out on the server
            # when an application spends a long time between two calls to
            # readchunk().
            client._close_cursor_now(
                outfile._GridOut__chunk_iter._cursor.cursor_id,
                _CursorAddress(client.address, db.fs.chunks.full_name))

            # Read the rest of the file without error.
            self.assertEqual(len(outfile.read()), len(data) - chunk_size)

        # Paranoid, ensure that a getMore was actually sent.
        self.assertIn("getMore", listener.started_command_names())
Exemplo n.º 7
0
    def test_errors(self):
        with ignore_deprecations():
            # We must call getlasterror, etc. on same socket as last operation.
            db = rs_or_single_client(maxPoolSize=1).pymongo_test
            db.reset_error_history()
            self.assertEqual(None, db.error())
            self.assertEqual(None, db.previous_error())

            db.command("forceerror", check=False)
            self.assertTrue(db.error())
            self.assertTrue(db.previous_error())

            db.command("forceerror", check=False)
            self.assertTrue(db.error())
            prev_error = db.previous_error()
            self.assertEqual(prev_error["nPrev"], 1)
            del prev_error["nPrev"]
            prev_error.pop("lastOp", None)
            error = db.error()
            error.pop("lastOp", None)
            # getLastError includes "connectionId" in recent
            # server versions, getPrevError does not.
            error.pop("connectionId", None)
            self.assertEqual(error, prev_error)

            db.test.find_one()
            self.assertEqual(None, db.error())
            self.assertTrue(db.previous_error())
            self.assertEqual(db.previous_error()["nPrev"], 2)

            db.reset_error_history()
            self.assertEqual(None, db.error())
            self.assertEqual(None, db.previous_error())
    def test_retry_timeout_raises_original_error(self):
        """A ServerSelectionTimeoutError on the retry attempt raises the
        original error.
        """
        listener = OvertCommandListener()
        client = rs_or_single_client(
            retryWrites=True, event_listeners=[listener])
        self.addCleanup(client.close)
        topology = client._topology
        select_server = topology.select_server

        def mock_select_server(*args, **kwargs):
            server = select_server(*args, **kwargs)

            def raise_error(*args, **kwargs):
                raise ServerSelectionTimeoutError(
                    'No primary available for writes')
            # Raise ServerSelectionTimeout on the retry attempt.
            topology.select_server = raise_error
            return server

        for method, args, kwargs in retryable_single_statement_ops(
                client.db.retryable_write_test):
            msg = '%s(*%r, **%r)' % (method.__name__, args, kwargs)
            listener.results.clear()
            topology.select_server = mock_select_server
            with self.assertRaises(ConnectionFailure, msg=msg):
                method(*args, **kwargs)
            self.assertEqual(len(listener.results['started']), 1, msg)
Exemplo n.º 9
0
 def setUpClass(cls):
     cls.listener = EventListener()
     cls.saved_listeners = monitoring._LISTENERS
     monitoring._LISTENERS = monitoring._Listeners([], [], [], [])
     cls.client = rs_or_single_client(event_listeners=[cls.listener])
     cls.db = cls.client.pymongo_test
     cls.collation = Collation('en_US')
    def test_cursor_transfer(self):

        # This is just a test, don't try this at home...

        client = rs_or_single_client()
        db = client.pymongo_test

        db.test.delete_many({})
        db.test.insert_many([{'_id': i} for i in range(200)])

        class CManager(CursorManager):
            def __init__(self, client):
                super(CManager, self).__init__(client)

            def close(self, dummy, dummy2):
                # Do absolutely nothing...
                pass

        client.set_cursor_manager(CManager)
        docs = []
        cursor = db.test.find().batch_size(10)
        docs.append(next(cursor))
        cursor.close()
        docs.extend(cursor)
        self.assertEqual(len(docs), 10)
        cmd_cursor = {'id': cursor.cursor_id, 'firstBatch': []}
        ccursor = CommandCursor(cursor.collection, cmd_cursor,
                                cursor.address, retrieved=cursor.retrieved)
        docs.extend(ccursor)
        self.assertEqual(len(docs), 200)
 def setUp(self):
     self.c = rs_or_single_client()
     db = self.c[DB]
     db.unique.drop()
     db.test.drop()
     db.unique.insert_one({"_id": "jesse"})
     db.test.insert_many([{} for _ in range(10)])
    def test_increment_transaction_id_without_sending_command(self):
        """Test that the txnNumber field is properly incremented, even when
        the first attempt fails before sending the command.
        """
        listener = OvertCommandListener()
        client = rs_or_single_client(
            retryWrites=True, event_listeners=[listener])
        topology = client._topology
        select_server = topology.select_server

        def raise_connection_err_select_server(*args, **kwargs):
            # Raise ConnectionFailure on the first attempt and perform
            # normal selection on the retry attempt.
            topology.select_server = select_server
            raise ConnectionFailure('Connection refused')

        for method, args, kwargs in _retryable_single_statement_ops(
                client.db.retryable_write_test):
            listener.results.clear()
            topology.select_server = raise_connection_err_select_server
            with client.start_session() as session:
                kwargs = copy.deepcopy(kwargs)
                kwargs['session'] = session
                msg = '%s(*%r, **%r)' % (method.__name__, args, kwargs)
                initial_txn_id = session._server_session.transaction_id

                # Each operation should fail on the first attempt and succeed
                # on the second.
                method(*args, **kwargs)
                self.assertEqual(len(listener.results['started']), 1, msg)
                retry_cmd = listener.results['started'][0].command
                sent_txn_id = retry_cmd['txnNumber']
                final_txn_id = session._server_session.transaction_id
                self.assertEqual(Int64(initial_txn_id + 1), sent_txn_id, msg)
                self.assertEqual(sent_txn_id, final_txn_id, msg)
Exemplo n.º 13
0
 def setUp(self):
     self.listener = SessionTestListener()
     self.session_checker_listener = SessionTestListener()
     self.client = rs_or_single_client(
         event_listeners=[self.listener, self.session_checker_listener])
     self.db = self.client.pymongo_test
     self.initial_lsids = set(s['id'] for s in session_ids(self.client))
    def test_max_pool_size(self):
        max_pool_size = 4
        c = rs_or_single_client(maxPoolSize=max_pool_size)
        collection = c[DB].test

        # Need one document.
        collection.drop()
        collection.insert_one({})

        # nthreads had better be much larger than max_pool_size to ensure that
        # max_pool_size sockets are actually required at some point in this
        # test's execution.
        cx_pool = get_pool(c)
        nthreads = 10
        threads = []
        lock = threading.Lock()
        self.n_passed = 0

        def f():
            for _ in range(5):
                collection.find_one({'$where': delay(0.1)})
                assert len(cx_pool.sockets) <= max_pool_size

            with lock:
                self.n_passed += 1

        for i in range(nthreads):
            t = threading.Thread(target=f)
            threads.append(t)
            t.start()

        joinall(threads)
        self.assertEqual(nthreads, self.n_passed)
        self.assertTrue(len(cx_pool.sockets) > 1)
        self.assertEqual(max_pool_size, cx_pool._socket_semaphore.counter)
Exemplo n.º 15
0
    def test_implicit_session_logout(self):
        listener = SessionTestListener()

        # Changing auth doesn't invalidate the session. Start as root.
        client = rs_or_single_client(event_listeners=[listener])
        db = client.pymongo_test

        for name, f in [
            ('bulk_write', lambda: db.collection.bulk_write([InsertOne({})])),
            ('collection_names', db.collection_names),
            ('find_one', db.collection.find_one),
            ('aggregate', lambda: list(db.collection.aggregate([])))
        ]:
            def sub_test():
                listener.results.clear()
                f()
                for event in listener.results['started']:
                    self.assertIn(
                        'lsid', event.command,
                        "%s sent no lsid with %s" % (
                            name, event.command_name))

            # We switch auth without clearing the pool of session ids. The
            # server considers these to be new sessions since it's a new user.
            # The old sessions time out on the server after 30 minutes.
            client.admin.logout()
            db.authenticate('second-user', 'pass')
            sub_test()
            db.logout()
            client.admin.authenticate(db_user, db_pwd)
            sub_test()
    def test_max_pool_size_none(self):
        c = rs_or_single_client(maxPoolSize=None)
        collection = c[DB].test

        # Need one document.
        collection.drop()
        collection.insert_one({})

        cx_pool = get_pool(c)
        nthreads = 10
        threads = []
        lock = threading.Lock()
        self.n_passed = 0

        def f():
            for _ in range(5):
                collection.find_one({'$where': delay(0.1)})

            with lock:
                self.n_passed += 1

        for i in range(nthreads):
            t = threading.Thread(target=f)
            threads.append(t)
            t.start()

        joinall(threads)
        self.assertEqual(nthreads, self.n_passed)
        self.assertTrue(len(cx_pool.sockets) > 1)
Exemplo n.º 17
0
    def test_exhaust_getmore_network_error(self):
        # When doing a getmore on an exhaust cursor, the socket stays checked
        # out on success but it's checked in on error to avoid semaphore leaks.
        client = rs_or_single_client(maxPoolSize=1)
        collection = client.pymongo_test.test
        collection.drop()
        collection.insert_many([{} for _ in range(200)])  # More than one batch.
        pool = get_pool(client)
        pool._check_interval_seconds = None  # Never check.

        cursor = collection.find(cursor_type=CursorType.EXHAUST)

        # Initial query succeeds.
        cursor.next()

        # Cause a network error.
        sock_info = cursor._Cursor__exhaust_mgr.sock
        sock_info.sock.close()

        # A getmore fails.
        self.assertRaises(ConnectionFailure, list, cursor)
        self.assertTrue(sock_info.closed)

        # The socket was closed and the semaphore was decremented.
        self.assertNotIn(sock_info, pool.sockets)
        self.assertTrue(pool._socket_semaphore.acquire(blocking=False))
    def test_init_disconnected(self):
        c = rs_or_single_client(connect=False)

        self.assertIsInstance(c.is_primary, bool)
        self.assertIsInstance(c.is_mongos, bool)
        self.assertIsInstance(c.max_pool_size, int)
        self.assertIsInstance(c.nodes, frozenset)

        self.assertEqual(c.codec_options, CodecOptions())
        self.assertIsInstance(c.max_bson_size, int)
        self.assertIsInstance(c.max_write_batch_size, int)
        self.assertFalse(c.primary)
        self.assertFalse(c.secondaries)

        c.pymongo_test.command('ismaster')  # Auto-connect.

        if client_context.is_rs:
            # The primary's host and port are from the replica set config.
            self.assertIsNotNone(c.address)
        else:
            self.assertEqual(c.address, (host, port))

        bad_host = "somedomainthatdoesntexist.org"
        c = MongoClient(bad_host, port, connectTimeoutMS=1,
                        serverSelectionTimeoutMS=10)
        self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one)
Exemplo n.º 19
0
    def test_auth_network_error(self):
        # Make sure there's no semaphore leak if we get a network error
        # when authenticating a new socket with cached credentials.

        # Get a client with one socket so we detect if it's leaked.
        c = connected(rs_or_single_client(maxPoolSize=1,
                                          waitQueueTimeoutMS=1))

        # Simulate an authenticate() call on a different socket.
        credentials = auth._build_credentials_tuple(
            'DEFAULT', 'admin', db_user, db_pwd, {})

        c._cache_credentials('test', credentials, connect=False)

        # Cause a network error on the actual socket.
        pool = get_pool(c)
        socket_info = one(pool.sockets)
        socket_info.sock.close()

        # SocketInfo.check_auth logs in with the new credential, but gets a
        # socket.error. Should be reraised as AutoReconnect.
        self.assertRaises(AutoReconnect, c.test.collection.find_one)

        # No semaphore leak, the pool is allowed to make a new socket.
        c.test.collection.find_one()
Exemplo n.º 20
0
    def test_close_kills_cursor_synchronously(self):
        # Kill any cursors possibly queued up by previous tests.
        gc.collect()
        self.client._process_periodic_tasks()

        listener = WhiteListEventListener("killCursors")
        results = listener.results
        client = rs_or_single_client(event_listeners=[listener])
        self.addCleanup(client.close)
        coll = client[self.db.name].test_close_kills_cursors

        # Add some test data.
        docs_inserted = 1000
        coll.insert_many([{"i": i} for i in range(docs_inserted)])

        results.clear()

        # Close the cursor while it's still open on the server.
        cursor = coll.find().batch_size(10)
        self.assertTrue(bool(next(cursor)))
        self.assertLess(cursor.retrieved, docs_inserted)
        cursor.close()

        # Test that the cursor was closed.
        self.assertEqual(1, len(results["started"]))
        self.assertEqual("killCursors", results["started"][0].command_name)
        self.assertEqual(1, len(results["succeeded"]))
        self.assertEqual("killCursors", results["succeeded"][0].command_name)
    def test_list_collection_names_filter(self):
        listener = OvertCommandListener()
        results = listener.results
        client = rs_or_single_client(event_listeners=[listener])
        db = client[self.db.name]
        db.capped.drop()
        db.create_collection("capped", capped=True, size=4096)
        db.capped.insert_one({})
        db.non_capped.insert_one({})
        self.addCleanup(client.drop_database, db.name)

        # Should not send nameOnly.
        for filter in ({'options.capped': True},
                       {'options.capped': True, 'name': 'capped'}):
            results.clear()
            names = db.list_collection_names(filter=filter)
            self.assertEqual(names, ["capped"])
            self.assertNotIn("nameOnly", results["started"][0].command)

        # Should send nameOnly (except on 2.6).
        for filter in (None, {}, {'name': {'$in': ['capped', 'non_capped']}}):
            results.clear()
            names = db.list_collection_names(filter=filter)
            self.assertIn("capped", names)
            self.assertIn("non_capped", names)
            command = results["started"][0].command
            if client_context.version >= (3, 0):
                self.assertIn("nameOnly", command)
                self.assertTrue(command["nameOnly"])
            else:
                self.assertNotIn("nameOnly", command)
Exemplo n.º 22
0
    def test_write_concern(self):
        c = rs_or_single_client(connect=False)
        self.assertEqual(WriteConcern(), c.write_concern)

        c = rs_or_single_client(connect=False, w=2, wtimeout=1000)
        wc = WriteConcern(w=2, wtimeout=1000)
        self.assertEqual(wc, c.write_concern)

        db = c.pymongo_test
        self.assertEqual(wc, db.write_concern)
        coll = db.test
        self.assertEqual(wc, coll.write_concern)

        cwc = WriteConcern(j=True)
        coll = db.get_collection('test', write_concern=cwc)
        self.assertEqual(cwc, coll.write_concern)
        self.assertEqual(wc, db.write_concern)
Exemplo n.º 23
0
 def setUp(self):
     self.listener = SessionTestListener()
     self.session_checker_listener = SessionTestListener()
     self.client = rs_or_single_client(
         event_listeners=[self.listener, self.session_checker_listener])
     self.addCleanup(self.client.close)
     self.db = self.client.pymongo_test
     self.initial_lsids = set(s['id'] for s in session_ids(self.client))
 def setUpClass(cls):
     super(TestRetryableWritesMMAPv1, cls).setUpClass()
     # Speed up the tests by decreasing the heartbeat frequency.
     cls.knobs = client_knobs(heartbeat_frequency=0.1,
                              min_heartbeat_interval=0.1)
     cls.knobs.enable()
     cls.client = rs_or_single_client(retryWrites=True)
     cls.db = cls.client.pymongo_test
Exemplo n.º 25
0
    def test_write_concern(self):
        c = rs_or_single_client(connect=False)
        self.assertEqual(WriteConcern(), c.write_concern)

        c = rs_or_single_client(connect=False, w=2, wtimeout=1000)
        wc = WriteConcern(w=2, wtimeout=1000)
        self.assertEqual(wc, c.write_concern)

        db = c.pymongo_test
        self.assertEqual(wc, db.write_concern)
        coll = db.test
        self.assertEqual(wc, coll.write_concern)

        cwc = WriteConcern(j=True)
        coll = db.get_collection('test', write_concern=cwc)
        self.assertEqual(cwc, coll.write_concern)
        self.assertEqual(wc, db.write_concern)
Exemplo n.º 26
0
 def test_3_uri_connection_pool_options(self):
     opts = '&'.join(['%s=%s' % (k, v)
                      for k, v in self.POOL_OPTIONS.items()])
     uri = 'mongodb://%s/?%s' % (client_context.pair, opts)
     client = rs_or_single_client(uri, **self.credentials)
     self.addCleanup(client.close)
     pool_opts = get_pool(client).opts
     self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS)
 def setUp(self):
     super(_TestPoolingBase, self).setUp()
     self.c = rs_or_single_client()
     db = self.c[DB]
     db.unique.drop()
     db.test.drop()
     db.unique.insert_one({"_id": "jesse"})
     db.test.insert_many([{} for _ in range(10)])
    def assertWriteOpsRaise(self, write_concern, expected_exception):
        wc = write_concern.document
        # Set socket timeout to avoid indefinite stalls
        client = rs_or_single_client(
            w=wc['w'], wTimeoutMS=wc['wtimeout'], socketTimeoutMS=30000)
        db = client.get_database('pymongo_test')
        coll = db.test

        def insert_command():
            coll.database.command(
                'insert', 'new_collection', documents=[{}],
                writeConcern=write_concern.document,
                parse_write_concern_error=True)

        ops = [
            ('insert_one', lambda: coll.insert_one({})),
            ('insert_many', lambda: coll.insert_many([{}, {}])),
            ('update_one', lambda: coll.update_one({}, {'$set': {'x': 1}})),
            ('update_many', lambda: coll.update_many({}, {'$set': {'x': 1}})),
            ('delete_one', lambda: coll.delete_one({})),
            ('delete_many', lambda: coll.delete_many({})),
            ('bulk_write', lambda: coll.bulk_write([InsertOne({})])),
            ('command', insert_command),
        ]
        ops_require_34 = [
            ('aggregate', lambda: coll.aggregate([{'$out': 'out'}])),
            ('create', lambda: db.create_collection('new')),
            ('rename', lambda: coll.rename('new')),
            ('drop', lambda: db.new.drop()),
        ]
        if client_context.version > (3, 4):
            ops.extend(ops_require_34)
            # SERVER-34776: dropDatabase does not respect wtimeout in 3.6.
            if client_context.version[:2] != (3, 6):
                ops.append(('drop_database', lambda: client.drop_database(db)))
            # SERVER-46668: createIndexes does not respect wtimeout in 4.4+.
            if client_context.version <= (4, 3):
                ops.extend([
                    ('create_index',
                     lambda: coll.create_index([('a', DESCENDING)])),
                    ('create_indexes',
                     lambda: coll.create_indexes([IndexModel('b')])),
                    ('drop_index',
                     lambda: coll.drop_index([('a', DESCENDING)])),
                ])

        for name, f in ops:
            # Ensure insert_many and bulk_write still raise BulkWriteError.
            if name in ('insert_many', 'bulk_write'):
                expected = BulkWriteError
            else:
                expected = expected_exception
            with self.assertRaises(expected, msg=name) as cm:
                f()
            if expected == BulkWriteError:
                bulk_result = cm.exception.details
                wc_errors = bulk_result['writeConcernErrors']
                self.assertTrue(wc_errors)
Exemplo n.º 29
0
    def test_list_collection_names_single_socket(self):
        client = rs_or_single_client(maxPoolSize=1)
        client.drop_database('test_collection_names_single_socket')
        db = client.test_collection_names_single_socket
        for i in range(200):
            db.create_collection(str(i))

        db.list_collection_names()  # Must not hang.
        client.drop_database('test_collection_names_single_socket')
Exemplo n.º 30
0
    def setUpClass(cls):
        super(TestSession, cls).setUpClass()
        # Create a second client so we can make sure clients cannot share
        # sessions.
        cls.client2 = rs_or_single_client()

        # Redact no commands, so we can test user-admin commands have "lsid".
        cls.sensitive_commands = monitoring._SENSITIVE_COMMANDS.copy()
        monitoring._SENSITIVE_COMMANDS.clear()
Exemplo n.º 31
0
    def setUpClass(cls):
        super(TestSession, cls).setUpClass()
        # Create a second client so we can make sure clients cannot share
        # sessions.
        cls.client2 = rs_or_single_client()

        # Redact no commands, so we can test user-admin commands have "lsid".
        cls.sensitive_commands = monitoring._SENSITIVE_COMMANDS.copy()
        monitoring._SENSITIVE_COMMANDS.clear()
 def setUpClass(cls):
     super(TestCollation, cls).setUpClass()
     cls.listener = EventListener()
     cls.client = rs_or_single_client(event_listeners=[cls.listener])
     cls.db = cls.client.pymongo_test
     cls.collation = Collation('en_US')
     cls.warn_context = warnings.catch_warnings()
     cls.warn_context.__enter__()
     warnings.simplefilter("ignore", DeprecationWarning)
Exemplo n.º 33
0
    def test_socket_timeout_ms_validation(self):
        c = rs_or_single_client(socketTimeoutMS=10 * 1000)
        self.assertEqual(10, get_pool(c).opts.socket_timeout)

        c = connected(rs_or_single_client(socketTimeoutMS=None))
        self.assertEqual(None, get_pool(c).opts.socket_timeout)

        self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS=0)

        self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS=-1)

        self.assertRaises(ValueError,
                          rs_or_single_client,
                          socketTimeoutMS=1e10)

        self.assertRaises(ValueError,
                          rs_or_single_client,
                          socketTimeoutMS='foo')
Exemplo n.º 34
0
    def test_cursor(self):
        listener = SessionTestListener()
        client = rs_or_single_client(event_listeners=[listener])
        client.drop_database('pymongo_test')
        self.addCleanup(client.drop_database, 'pymongo_test')

        coll = client.pymongo_test.collection
        coll.insert_many([{} for _ in range(1000)])
        self.addCleanup(self.client.pymongo_test.collection.drop)

        # Test all cursor methods.
        ops = [
            ('find', lambda session: list(coll.find(session=session))),
            ('getitem', lambda session: coll.find(session=session)[0]),
            ('count', lambda session: coll.find(session=session).count()),
            ('distinct',
             lambda session: coll.find(session=session).distinct('a')),
            ('explain', lambda session: coll.find(session=session).explain()),
        ]

        for name, f in ops:
            with client.start_session() as s:
                listener.results.clear()
                f(session=s)
                self.assertGreaterEqual(len(listener.results['started']), 1)
                for event in listener.results['started']:
                    self.assertTrue(
                        'lsid' in event.command,
                        "%s sent no lsid with %s" % (name, event.command_name))

                    self.assertEqual(
                        s.session_id, event.command['lsid'],
                        "%s sent wrong lsid with %s" %
                        (name, event.command_name))

            with self.assertRaisesRegex(InvalidOperation, "ended session"):
                f(session=s)

        # No explicit session.
        for name, f in ops:
            listener.results.clear()
            f(session=None)
            event0 = listener.first_command_started()
            self.assertTrue(
                'lsid' in event0.command,
                "%s sent no lsid with %s" % (name, event0.command_name))

            lsid = event0.command['lsid']

            for event in listener.results['started'][1:]:
                self.assertTrue(
                    'lsid' in event.command,
                    "%s sent no lsid with %s" % (name, event.command_name))

                self.assertEqual(
                    lsid, event.command['lsid'],
                    "%s sent wrong lsid with %s" % (name, event.command_name))
 def setUpClass(cls):
     cls.listener = EventListener()
     cls.saved_listeners = monitoring._LISTENERS
     monitoring._LISTENERS = monitoring._Listeners([], [], [], [])
     cls.client = rs_or_single_client(event_listeners=[cls.listener])
     cls.db = cls.client.pymongo_test
     cls.collation = Collation('en_US')
     cls.warn_context = warnings.catch_warnings()
     cls.warn_context.__enter__()
     warnings.simplefilter("ignore", DeprecationWarning)
 def setUpClass(cls):
     super(TestRetryableWrites, cls).setUpClass()
     # Speed up the tests by decreasing the heartbeat frequency.
     cls.knobs = client_knobs(heartbeat_frequency=0.1,
                              min_heartbeat_interval=0.1)
     cls.knobs.enable()
     cls.listener = OvertCommandListener()
     cls.client = rs_or_single_client(retryWrites=True,
                                      event_listeners=[cls.listener])
     cls.db = cls.client.pymongo_test
Exemplo n.º 37
0
 def setUpClass(cls):
     cls.listener = EventListener()
     cls.saved_listeners = monitoring._LISTENERS
     monitoring._LISTENERS = monitoring._Listeners([], [], [], [])
     cls.client = rs_or_single_client(event_listeners=[cls.listener])
     cls.db = cls.client.pymongo_test
     cls.collation = Collation('en_US')
     cls.warn_context = warnings.catch_warnings()
     cls.warn_context.__enter__()
     warnings.simplefilter("ignore", DeprecationWarning)
 def setUpClass(cls):
     super(TestRetryableWrites, cls).setUpClass()
     # Speed up the tests by decreasing the heartbeat frequency.
     cls.knobs = client_knobs(heartbeat_frequency=0.1,
                              min_heartbeat_interval=0.1)
     cls.knobs.enable()
     cls.listener = OvertCommandListener()
     cls.client = rs_or_single_client(
         retryWrites=True, event_listeners=[cls.listener])
     cls.db = cls.client.pymongo_test
Exemplo n.º 39
0
    def test_use_after_close(self):
        opts = AutoEncryptionOpts(KMS_PROVIDERS, 'keyvault.datakeys')
        client = rs_or_single_client(auto_encryption_opts=opts)
        self.addCleanup(client.close)

        client.admin.command('isMaster')
        client.close()
        with self.assertRaisesRegex(InvalidOperation,
                                    'Cannot use MongoClient after close'):
            client.admin.command('isMaster')
Exemplo n.º 40
0
    def test_collection_names_single_socket(self):
        # Test that Database.collection_names only requires one socket.
        client = rs_or_single_client(maxPoolSize=1)
        client.drop_database('test_collection_names_single_socket')
        db = client.test_collection_names_single_socket
        for i in range(200):
            db.create_collection(str(i))

        db.collection_names()  # Must not hang.
        client.drop_database('test_collection_names_single_socket')
Exemplo n.º 41
0
    def test_session_authenticate_multiple(self):
        # Logged in as root.
        client = rs_or_single_client()
        client.pymongo_test.add_user('second-user', 'pass')
        self.addCleanup(client.pymongo_test.remove_user, 'second-user')

        client.pymongo_test.authenticate('second-user', 'pass')

        with self.assertRaises(InvalidOperation):
            client.start_session()
Exemplo n.º 42
0
    def test_collection(self):
        listener = SessionTestListener()
        client = rs_or_single_client(event_listeners=[listener])
        client.drop_database('pymongo_test')
        self.addCleanup(client.drop_database, 'pymongo_test')

        coll = client.pymongo_test.collection
        self.addCleanup(self.client.pymongo_test.collection.drop)

        # Test some collection methods - the rest are in test_cursor.
        self._test_ops(
            client,
            (coll.drop, [], {}),
            (coll.bulk_write, [[InsertOne({})]], {}),
            (coll.insert_one, [{}], {}),
            (coll.insert_many, [[{}, {}]], {}),
            (coll.replace_one, [{}, {}], {}),
            (coll.update_one, [{}, {
                '$set': {
                    'a': 1
                }
            }], {}),
            (coll.update_many, [{}, {
                '$set': {
                    'a': 1
                }
            }], {}),
            (coll.delete_one, [{}], {}),
            (coll.delete_many, [{}], {}),
            (coll.map_reduce, ['function() {}', 'function() {}', 'output'
                               ], {}),
            (coll.inline_map_reduce, ['function() {}', 'function() {}'], {}),
            (coll.find_one_and_replace, [{}, {}], {}),
            (coll.find_one_and_update, [{}, {
                '$set': {
                    'a': 1
                }
            }], {}),
            (coll.find_one_and_delete, [{}, {}], {}),
            (coll.rename, ['collection2'], {}),
            # Drop collection2 between tests of "rename", above.
            (client.pymongo_test.drop_collection, ['collection2'], {}),
            (coll.distinct, ['a'], {}),
            (coll.find_one, [], {}),
            (coll.count, [], {}),
            (coll.create_indexes, [[IndexModel('a')]], {}),
            (coll.create_index, ['a'], {}),
            (coll.drop_index, ['a_1'], {}),
            (coll.drop_indexes, [], {}),
            (coll.reindex, [], {}),
            (coll.list_indexes, [], {}),
            (coll.index_information, [], {}),
            (coll.options, [], {}),
            (coll.aggregate, [[]], {}))
    def test_omit_default_read_write_concern(self):
        listener = EventListener()
        # Client with default readConcern and writeConcern
        client = rs_or_single_client(event_listeners=[listener])
        collection = client.pymongo_test.collection
        # Prepare for tests of find() and aggregate().
        collection.insert_many([{} for _ in range(10)])
        self.addCleanup(collection.drop)
        self.addCleanup(client.pymongo_test.collection2.drop)

        # Commands MUST NOT send the default read/write concern to the server.

        def rename_and_drop():
            # Ensure collection exists.
            collection.insert_one({})
            collection.rename('collection2')
            client.pymongo_test.collection2.drop()

        def insert_command_default_write_concern():
            collection.database.command('insert',
                                        'collection',
                                        documents=[{}],
                                        write_concern=WriteConcern())

        ops = [('aggregate', lambda: list(collection.aggregate([]))),
               ('find', lambda: list(collection.find())),
               ('insert_one', lambda: collection.insert_one({})),
               ('update_one',
                lambda: collection.update_one({}, {'$set': {
                    'x': 1
                }})),
               ('update_many',
                lambda: collection.update_many({}, {'$set': {
                    'x': 1
                }})), ('delete_one', lambda: collection.delete_one({})),
               ('delete_many', lambda: collection.delete_many({})),
               ('bulk_write', lambda: collection.bulk_write([InsertOne({})])),
               ('rename_and_drop', rename_and_drop),
               ('command', insert_command_default_write_concern)]

        for name, f in ops:
            listener.results.clear()
            f()

            self.assertGreaterEqual(len(listener.results['started']), 1)
            for i, event in enumerate(listener.results['started']):
                self.assertNotIn(
                    'readConcern', event.command,
                    "%s sent default readConcern with %s" %
                    (name, event.command_name))
                self.assertNotIn(
                    'writeConcern', event.command,
                    "%s sent default writeConcern with %s" %
                    (name, event.command_name))
 def test_command_options(self):
     listener = OvertCommandListener()
     client = rs_or_single_client(server_api=ServerApi('1'),
                                  event_listeners=[listener])
     self.addCleanup(client.close)
     coll = client.test.test
     coll.insert_many([{} for _ in range(100)])
     self.addCleanup(coll.delete_many, {})
     list(coll.find(batch_size=25))
     client.admin.command('ping')
     self.assertServerApiInAllCommands(listener.results['started'])
Exemplo n.º 45
0
 def setUpClass(cls):
     super(TestSdamMonitoring, cls).setUpClass()
     # Speed up the tests by decreasing the event publish frequency.
     cls.knobs = client_knobs(events_queue_frequency=0.1)
     cls.knobs.enable()
     cls.listener = ServerAndTopologyEventListener()
     retry_writes = client_context.supports_transactions()
     cls.test_client = rs_or_single_client(
         event_listeners=[cls.listener], retryWrites=retry_writes)
     cls.coll = cls.test_client[cls.client.db.name].test
     cls.coll.insert_one({})
    def test_try_next_runs_one_getmore(self):
        listener = EventListener()
        client = rs_or_single_client(event_listeners=[listener])
        # Connect to the cluster.
        client.admin.command('ping')
        listener.results.clear()
        # ChangeStreams only read majority committed data so use w:majority.
        coll = self.watched_collection().with_options(
            write_concern=WriteConcern("majority"))
        coll.drop()
        # Create the watched collection before starting the change stream to
        # skip any "create" events.
        coll.insert_one({'_id': 1})
        self.addCleanup(coll.drop)
        with self.change_stream_with_client(
                client, max_await_time_ms=250) as stream:
            self.assertEqual(listener.started_command_names(), ["aggregate"])
            listener.results.clear()

            # Confirm that only a single getMore is run even when no documents
            # are returned.
            self.assertIsNone(stream.try_next())
            self.assertEqual(listener.started_command_names(), ["getMore"])
            listener.results.clear()
            self.assertIsNone(stream.try_next())
            self.assertEqual(listener.started_command_names(), ["getMore"])
            listener.results.clear()

            # Get at least one change before resuming.
            coll.insert_one({'_id': 2})
            change = stream.try_next()
            self.assertEqual(change['_id'], stream._resume_token)
            listener.results.clear()

            # Cause the next request to initiate the resume process.
            self.kill_change_stream_cursor(stream)
            listener.results.clear()

            # The sequence should be:
            # - getMore, fail
            # - resume with aggregate command
            # - no results, return immediately without another getMore
            self.assertIsNone(stream.try_next())
            self.assertEqual(
                listener.started_command_names(), ["getMore", "aggregate"])
            listener.results.clear()

            # Stream still works after a resume.
            coll.insert_one({'_id': 3})
            change = stream.try_next()
            self.assertEqual(change['_id'], stream._resume_token)
            self.assertEqual(listener.started_command_names(), ["getMore"])
            self.assertIsNone(stream.try_next())
Exemplo n.º 47
0
    def test_aggregate_error(self):
        listener = SessionTestListener()
        client = rs_or_single_client(event_listeners=[listener])
        coll = client.pymongo_test.collection
        with self.assertRaises(OperationFailure):
            coll.aggregate([{'$badOperation': {'bar': 1}}])

        event = listener.first_command_started()
        self.assertEqual(event.command_name, 'aggregate')
        lsid = event.command['lsid']
        # Session was returned to pool despite error.
        self.assertIn(lsid, session_ids(client))
Exemplo n.º 48
0
    def test_streaming_rtt(self):
        listener = ServerEventListener()
        hb_listener = HeartbeatEventListener()
        # On Windows, RTT can actually be 0.0 because time.time() only has
        # 1-15 millisecond resolution. We need to delay the initial isMaster
        # to ensure that RTT is never zero.
        name = 'streamingRttTest'
        delay_ismaster = {
            'configureFailPoint': 'failCommand',
            'mode': {
                'times': 1000
            },
            'data': {
                'failCommands': ['isMaster'],
                'blockConnection': True,
                'blockTimeMS': 20,
                # This can be uncommented after SERVER-49220 is fixed.
                # 'appName': name,
            },
        }
        with self.fail_point(delay_ismaster):
            client = rs_or_single_client(
                event_listeners=[listener, hb_listener],
                heartbeatFrequencyMS=500,
                appName=name)
            self.addCleanup(client.close)
            # Force a connection.
            client.admin.command('ping')
            address = client.address

        delay_ismaster['data']['blockTimeMS'] = 500
        delay_ismaster['data']['appName'] = name
        with self.fail_point(delay_ismaster):

            def rtt_exceeds_250_ms():
                # XXX: Add a public TopologyDescription getter to MongoClient?
                topology = client._topology
                sd = topology.description.server_descriptions()[address]
                return sd.round_trip_time > 0.250

            wait_until(rtt_exceeds_250_ms, 'exceed 250ms RTT')

        # Server should be selectable.
        client.admin.command('ping')

        def changed_event(event):
            return (event.server_address == address and isinstance(
                event, monitoring.ServerDescriptionChangedEvent))

        # There should only be one event published, for the initial discovery.
        events = listener.matching(changed_event)
        self.assertEqual(1, len(events))
        self.assertGreater(events[0].new_description.round_trip_time, 0)
Exemplo n.º 49
0
    def test_write_concern(self):
        c = rs_or_single_client(connect=False)
        self.assertEqual(WriteConcern(), c.write_concern)

        c = rs_or_single_client(connect=False, w=2, wtimeout=1000)
        wc = WriteConcern(w=2, wtimeout=1000)
        self.assertEqual(wc, c.write_concern)

        # Can we override back to the server default?
        db = c.get_database('pymongo_test', write_concern=WriteConcern())
        self.assertEqual(db.write_concern, WriteConcern())

        db = c.pymongo_test
        self.assertEqual(wc, db.write_concern)
        coll = db.test
        self.assertEqual(wc, coll.write_concern)

        cwc = WriteConcern(j=True)
        coll = db.get_collection('test', write_concern=cwc)
        self.assertEqual(cwc, coll.write_concern)
        self.assertEqual(wc, db.write_concern)
Exemplo n.º 50
0
    def test_try_next_runs_one_getmore(self):
        listener = EventListener()
        client = rs_or_single_client(event_listeners=[listener])
        # Connect to the cluster.
        client.admin.command('ping')
        listener.results.clear()
        # ChangeStreams only read majority committed data so use w:majority.
        coll = self.watched_collection().with_options(
            write_concern=WriteConcern("majority"))
        coll.drop()
        # Create the watched collection before starting the change stream to
        # skip any "create" events.
        coll.insert_one({'_id': 1})
        self.addCleanup(coll.drop)
        with self.change_stream_with_client(client,
                                            max_await_time_ms=250) as stream:
            self.assertEqual(listener.started_command_names(), ["aggregate"])
            listener.results.clear()

            # Confirm that only a single getMore is run even when no documents
            # are returned.
            self.assertIsNone(stream.try_next())
            self.assertEqual(listener.started_command_names(), ["getMore"])
            listener.results.clear()
            self.assertIsNone(stream.try_next())
            self.assertEqual(listener.started_command_names(), ["getMore"])
            listener.results.clear()

            # Get at least one change before resuming.
            coll.insert_one({'_id': 2})
            change = stream.try_next()
            self.assertEqual(change['_id'], stream._resume_token)
            listener.results.clear()

            # Cause the next request to initiate the resume process.
            self.kill_change_stream_cursor(stream)
            listener.results.clear()

            # The sequence should be:
            # - getMore, fail
            # - resume with aggregate command
            # - no results, return immediately without another getMore
            self.assertIsNone(stream.try_next())
            self.assertEqual(listener.started_command_names(),
                             ["getMore", "aggregate"])
            listener.results.clear()

            # Stream still works after a resume.
            coll.insert_one({'_id': 3})
            change = stream.try_next()
            self.assertEqual(change['_id'], stream._resume_token)
            self.assertEqual(listener.started_command_names(), ["getMore"])
            self.assertIsNone(stream.try_next())
Exemplo n.º 51
0
    def test_write_concern(self):
        c = rs_or_single_client(connect=False)
        self.assertEqual(WriteConcern(), c.write_concern)

        c = rs_or_single_client(connect=False, w=2, wTimeoutMS=1000)
        wc = WriteConcern(w=2, wtimeout=1000)
        self.assertEqual(wc, c.write_concern)

        # Can we override back to the server default?
        db = c.get_database('pymongo_test', write_concern=WriteConcern())
        self.assertEqual(db.write_concern, WriteConcern())

        db = c.pymongo_test
        self.assertEqual(wc, db.write_concern)
        coll = db.test
        self.assertEqual(wc, coll.write_concern)

        cwc = WriteConcern(j=True)
        coll = db.get_collection('test', write_concern=cwc)
        self.assertEqual(cwc, coll.write_concern)
        self.assertEqual(wc, db.write_concern)
Exemplo n.º 52
0
    def test_last_status(self):
        # Tests many legacy API elements.
        # We must call getlasterror on same socket as the last operation.
        db = rs_or_single_client(maxPoolSize=1).pymongo_test
        db.test.remove({})
        db.test.save({"i": 1})

        db.test.update({"i": 1}, {"$set": {"i": 2}}, w=0)
        self.assertTrue(db.last_status()["updatedExisting"])

        db.test.update({"i": 1}, {"$set": {"i": 500}}, w=0)
        self.assertFalse(db.last_status()["updatedExisting"])
Exemplo n.º 53
0
    def test_init_disconnected(self):
        c = rs_or_single_client(connect=False)
        # is_primary causes client to block until connected
        self.assertIsInstance(c.is_primary, bool)

        c = rs_or_single_client(connect=False)
        self.assertIsInstance(c.is_mongos, bool)
        c = rs_or_single_client(connect=False)
        self.assertIsInstance(c.max_pool_size, int)
        self.assertIsInstance(c.nodes, frozenset)

        c = rs_or_single_client(connect=False)
        self.assertEqual(c.codec_options, CodecOptions())
        self.assertIsInstance(c.max_bson_size, int)
        c = rs_or_single_client(connect=False)
        self.assertFalse(c.primary)
        self.assertFalse(c.secondaries)
        c = rs_or_single_client(connect=False)
        self.assertIsInstance(c.max_write_batch_size, int)

        if client_context.is_rs:
            # The primary's host and port are from the replica set config.
            self.assertIsNotNone(c.address)
        else:
            self.assertEqual(c.address, (host, port))

        bad_host = "somedomainthatdoesntexist.org"
        c = MongoClient(bad_host,
                        port,
                        connectTimeoutMS=1,
                        serverSelectionTimeoutMS=10)
        self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one)
Exemplo n.º 54
0
    def test_auth_from_uri(self):
        self.client.admin.add_user("admin", "pass", roles=["root"])
        self.addCleanup(self.client.admin.remove_user, 'admin')
        self.addCleanup(remove_all_users, self.client.pymongo_test)

        self.client.pymongo_test.add_user("user",
                                          "pass",
                                          roles=['userAdmin', 'readWrite'])

        with self.assertRaises(OperationFailure):
            connected(rs_or_single_client("mongodb://*****:*****@%s:%d" %
                                          (host, port)))

        # No error.
        connected(
            rs_or_single_client_noauth("mongodb://*****:*****@%s:%d" %
                                       (host, port)))

        # Wrong database.
        uri = "mongodb://*****:*****@%s:%d/pymongo_test" % (host, port)
        with self.assertRaises(OperationFailure):
            connected(rs_or_single_client(uri))

        # No error.
        connected(
            rs_or_single_client_noauth(
                "mongodb://*****:*****@%s:%d/pymongo_test" % (host, port)))

        # Auth with lazy connection.
        rs_or_single_client("mongodb://*****:*****@%s:%d/pymongo_test" %
                            (host, port),
                            connect=False).pymongo_test.test.find_one()

        # Wrong password.
        bad_client = rs_or_single_client(
            "mongodb://*****:*****@%s:%d/pymongo_test" % (host, port),
            connect=False)

        self.assertRaises(OperationFailure,
                          bad_client.pymongo_test.test.find_one)
 def test_last_write_date(self):
     # From max-staleness-tests.rst, "Parse lastWriteDate".
     client = rs_or_single_client(heartbeatFrequencyMS=500)
     client.pymongo_test.test.insert_one({})
     time.sleep(2)
     server = client._topology.select_server(writable_server_selector)
     last_write = server.description.last_write_date
     self.assertTrue(last_write)
     client.pymongo_test.test.insert_one({})
     time.sleep(2)
     server = client._topology.select_server(writable_server_selector)
     self.assertGreater(server.description.last_write_date, last_write)
     self.assertLess(server.description.last_write_date, last_write + 10)
Exemplo n.º 56
0
    def test_killcursors(self):
        listener = SessionTestListener()
        client = rs_or_single_client(event_listeners=[listener])
        coll = client.pymongo_test.collection
        coll.insert_many([{} for _ in range(10)])
        self.addCleanup(self.client.pymongo_test.collection.drop)

        def explicit_close(session=None):
            cursor = coll.find(batch_size=2, session=session)
            next(cursor)
            cursor.close()

        self._test_ops(client, (explicit_close, [], {}))
Exemplo n.º 57
0
    def test_views_are_prohibited(self):
        self.client.db.view.drop()
        self.client.db.create_collection('view', viewOn='coll')
        self.addCleanup(self.client.db.view.drop)

        opts = AutoEncryptionOpts(self.kms_providers(), 'keyvault.datakeys')
        client_encrypted = rs_or_single_client(auto_encryption_opts=opts,
                                               uuidRepresentation='standard')
        self.addCleanup(client_encrypted.close)

        with self.assertRaisesRegex(EncryptionError,
                                    'cannot auto encrypt a view'):
            client_encrypted.db.view.insert_one({})
Exemplo n.º 58
0
 def test_raise_max_wire_version_error(self):
     opts = AutoEncryptionOpts(KMS_PROVIDERS, 'keyvault.datakeys')
     client = rs_or_single_client(auto_encryption_opts=opts)
     self.addCleanup(client.close)
     msg = 'Auto-encryption requires a minimum MongoDB version of 4.2'
     with self.assertRaisesRegex(ConfigurationError, msg):
         client.test.test.insert_one({})
     with self.assertRaisesRegex(ConfigurationError, msg):
         client.admin.command('isMaster')
     with self.assertRaisesRegex(ConfigurationError, msg):
         client.test.test.find_one({})
     with self.assertRaisesRegex(ConfigurationError, msg):
         client.test.test.bulk_write([InsertOne({})])
Exemplo n.º 59
0
 def test_last_write_date(self):
     # From max-staleness-tests.rst, "Parse lastWriteDate".
     client = rs_or_single_client(heartbeatFrequencyMS=500)
     client.pymongo_test.test.insert_one({})
     time.sleep(1)
     server = client._topology.select_server(writable_server_selector)
     last_write = server.description.last_write_date
     self.assertTrue(last_write)
     client.pymongo_test.test.insert_one({})
     time.sleep(1)
     server = client._topology.select_server(writable_server_selector)
     self.assertGreater(server.description.last_write_date, last_write)
     self.assertLess(server.description.last_write_date, last_write + 10)
Exemplo n.º 60
0
    def test_last_status(self):
        # Tests many legacy API elements.
        # We must call getlasterror on same socket as the last operation.
        db = rs_or_single_client(maxPoolSize=1).pymongo_test
        collection = db.test_last_status
        collection.remove({})
        collection.save({"i": 1})

        collection.update({"i": 1}, {"$set": {"i": 2}}, w=0)
        self.assertTrue(db.last_status()["updatedExisting"])

        collection.update({"i": 1}, {"$set": {"i": 500}}, w=0)
        self.assertFalse(db.last_status()["updatedExisting"])