Beispiel #1
0
    def test_ensure_index_threaded(self):
        coll = self.db.threaded_index_creation
        index_docs = []

        class Indexer(threading.Thread):
            def run(self):
                coll.ensure_index('foo0')
                coll.ensure_index('foo1')
                coll.ensure_index('foo2')
                index_docs.append(coll.index_information())

        try:
            threads = []
            for _ in range(10):
                t = Indexer()
                t.setDaemon(True)
                threads.append(t)

            for thread in threads:
                thread.start()

            joinall(threads)

            first = index_docs[0]
            for index_doc in index_docs[1:]:
                self.assertEqual(index_doc, first)
        finally:
            coll.drop()
Beispiel #2
0
    def test_ensure_purge_index_threaded(self):
        coll = self.db.threaded_index_creation

        class Indexer(threading.Thread):
            def run(self):
                coll.ensure_index('foo')
                try:
                    coll.drop_index('foo')
                except OperationFailure:
                    # The index may have already been dropped.
                    pass
                coll.ensure_index('foo')
                coll.drop_indexes()
                coll.create_index('foo')

        try:
            threads = []
            for _ in range(10):
                t = Indexer()
                t.setDaemon(True)
                threads.append(t)

            for thread in threads:
                thread.start()

            joinall(threads)

            self.assertTrue('foo_1' in coll.index_information())
        finally:
            coll.drop()
    def test_max_pool_size(self):
        max_pool_size = 4
        c = rs_or_single_client(maxPoolSize=max_pool_size)
        collection = c[DB].test

        # Need one document.
        collection.drop()
        collection.insert_one({})

        # nthreads had better be much larger than max_pool_size to ensure that
        # max_pool_size sockets are actually required at some point in this
        # test's execution.
        cx_pool = get_pool(c)
        nthreads = 10
        threads = []
        lock = threading.Lock()
        self.n_passed = 0

        def f():
            for _ in range(5):
                collection.find_one({'$where': delay(0.1)})
                assert len(cx_pool.sockets) <= max_pool_size

            with lock:
                self.n_passed += 1

        for i in range(nthreads):
            t = threading.Thread(target=f)
            threads.append(t)
            t.start()

        joinall(threads)
        self.assertEqual(nthreads, self.n_passed)
        self.assertTrue(len(cx_pool.sockets) > 1)
        self.assertEqual(0, cx_pool.requests)
    def test_max_pool_size(self):
        max_pool_size = 4
        c = rs_or_single_client(maxPoolSize=max_pool_size)
        collection = c[DB].test

        # Need one document.
        collection.drop()
        collection.insert_one({})

        # nthreads had better be much larger than max_pool_size to ensure that
        # max_pool_size sockets are actually required at some point in this
        # test's execution.
        cx_pool = get_pool(c)
        nthreads = 10
        threads = []
        lock = threading.Lock()
        self.n_passed = 0

        def f():
            for _ in range(5):
                collection.find_one({'$where': delay(0.1)})
                assert len(cx_pool.sockets) <= max_pool_size

            with lock:
                self.n_passed += 1

        for i in range(nthreads):
            t = threading.Thread(target=f)
            threads.append(t)
            t.start()

        joinall(threads)
        self.assertEqual(nthreads, self.n_passed)
        self.assertTrue(len(cx_pool.sockets) > 1)
        self.assertEqual(max_pool_size, cx_pool._socket_semaphore.counter)
Beispiel #5
0
    def test_ensure_unique_index_threaded(self):
        coll = self.db.test_unique_threaded
        coll.drop()
        coll.insert_many([{'foo': i} for i in range(10000)])

        class Indexer(threading.Thread):
            def run(self):
                try:
                    coll.ensure_index('foo', unique=True)
                    coll.insert_one({'foo': 'bar'})
                    coll.insert_one({'foo': 'bar'})
                except OperationFailure:
                    pass

        threads = []
        for _ in range(10):
            t = Indexer()
            t.setDaemon(True)
            threads.append(t)

        for i in range(10):
            threads[i].start()

        joinall(threads)

        self.assertEqual(10001, coll.count())
        coll.drop()
    def test_server_disconnect(self):
        # PYTHON-345, we need to make sure that threads' request sockets are
        # closed by disconnect().
        #
        # 1. Create a client with auto_start_request=True
        # 2. Start N threads and do a find() in each to get a request socket
        # 3. Pause all threads
        # 4. In the main thread close all sockets, including threads' request
        #       sockets
        # 5. In main thread, do a find(), which raises AutoReconnect and resets
        #       pool
        # 6. Resume all threads, do a find() in them
        #
        # If we've fixed PYTHON-345, then only one AutoReconnect is raised,
        # and all the threads get new request sockets.
        cx = get_client(auto_start_request=True)
        collection = cx.db.pymongo_test

        # acquire a request socket for the main thread
        collection.find_one()
        pool = get_pool(collection.database.connection)
        socket_info = pool._get_request_state()
        assert isinstance(socket_info, SocketInfo)
        request_sock = socket_info.sock

        state = FindPauseFind.create_shared_state(nthreads=40)

        threads = [FindPauseFind(collection, state) for _ in range(state.nthreads)]

        # Each thread does a find(), thus acquiring a request socket
        for t in threads:
            t.start()

        # Wait for the threads to reach the rendezvous
        FindPauseFind.wait_for_rendezvous(state)

        try:
            # Simulate an event that closes all sockets, e.g. primary stepdown
            for t in threads:
                t.request_sock.close()

            # Finally, ensure the main thread's socket's last_checkout is
            # updated:
            collection.find_one()

            # ... and close it:
            request_sock.close()

            # Doing an operation on the client raises an AutoReconnect and
            # resets the pool behind the scenes
            self.assertRaises(AutoReconnect, collection.find_one)

        finally:
            # Let threads do a second find()
            FindPauseFind.resume_after_rendezvous(state)

        joinall(threads)

        for t in threads:
            self.assertTrue(t.passed, "%s threw exception" % t)
    def test_auto_auth_login(self):
        client = self._get_client()
        self.assertRaises(OperationFailure, client.auth_test.test.find_one)

        # Admin auth
        client = self._get_client()
        client.admin.authenticate("admin-user", "password")

        nthreads = 10
        threads = []
        for _ in xrange(nthreads):
            t = AutoAuthenticateThreads(client.auth_test.test, 100)
            t.start()
            threads.append(t)

        joinall(threads)

        for t in threads:
            self.assertTrue(t.success)

        # Database-specific auth
        client = self._get_client()
        client.auth_test.authenticate("test-user", "password")

        threads = []
        for _ in xrange(nthreads):
            t = AutoAuthenticateThreads(client.auth_test.test, 100)
            t.start()
            threads.append(t)

        joinall(threads)

        for t in threads:
            self.assertTrue(t.success)
    def test_auto_auth_login(self):
        client = self._get_client()
        self.assertRaises(OperationFailure, client.auth_test.test.find_one)

        # Admin auth
        client = self._get_client()
        client.admin.authenticate("admin-user", "password")

        nthreads = 10
        threads = []
        for _ in xrange(nthreads):
            t = AutoAuthenticateThreads(client.auth_test.test, 100)
            t.start()
            threads.append(t)

        joinall(threads)

        for t in threads:
            self.assertTrue(t.success)

        # Database-specific auth
        client = self._get_client()
        client.auth_test.authenticate("test-user", "password")

        threads = []
        for _ in xrange(nthreads):
            t = AutoAuthenticateThreads(client.auth_test.test, 100)
            t.start()
            threads.append(t)

        joinall(threads)

        for t in threads:
            self.assertTrue(t.success)
    def test_ensure_purge_index_threaded(self):
        coll = self.db.threaded_index_creation

        class Indexer(threading.Thread):
            def run(self):
                coll.ensure_index('foo')
                try:
                    coll.drop_index('foo')
                except OperationFailure:
                    # The index may have already been dropped.
                    pass
                coll.ensure_index('foo')
                coll.drop_indexes()
                coll.create_index('foo')

        try:
            threads = []
            for _ in range(10):
                t = Indexer()
                t.setDaemon(True)
                threads.append(t)

            for thread in threads:
                thread.start()

            joinall(threads)

            self.assertTrue('foo_1' in coll.index_information())
        finally:
            coll.drop()
    def test_max_pool_size_none(self):
        c = rs_or_single_client(maxPoolSize=None)
        collection = c[DB].test

        # Need one document.
        collection.drop()
        collection.insert_one({})

        cx_pool = get_pool(c)
        nthreads = 10
        threads = []
        lock = threading.Lock()
        self.n_passed = 0

        def f():
            for _ in range(5):
                collection.find_one({'$where': delay(0.1)})

            with lock:
                self.n_passed += 1

        for i in range(nthreads):
            t = threading.Thread(target=f)
            threads.append(t)
            t.start()

        joinall(threads)
        self.assertEqual(nthreads, self.n_passed)
        self.assertTrue(len(cx_pool.sockets) > 1)
    def test_ensure_index_threaded(self):
        coll = self.db.threaded_index_creation
        index_docs = []

        class Indexer(threading.Thread):
            def run(self):
                coll.ensure_index('foo0')
                coll.ensure_index('foo1')
                coll.ensure_index('foo2')
                index_docs.append(coll.index_information())

        try:
            threads = []
            for _ in range(10):
                t = Indexer()
                t.setDaemon(True)
                threads.append(t)

            for thread in threads:
                thread.start()

            joinall(threads)

            first = index_docs[0]
            for index_doc in index_docs[1:]:
                self.assertEqual(index_doc, first)
        finally:
            coll.drop()
    def test_max_pool_size_none(self):
        c = rs_or_single_client(maxPoolSize=None)
        collection = c[DB].test

        # Need one document.
        collection.drop()
        collection.insert_one({})

        cx_pool = get_pool(c)
        nthreads = 10
        threads = []
        lock = threading.Lock()
        self.n_passed = 0

        def f():
            for _ in range(5):
                collection.find_one({'$where': delay(0.1)})

            with lock:
                self.n_passed += 1

        for i in range(nthreads):
            t = threading.Thread(target=f)
            threads.append(t)
            t.start()

        joinall(threads)
        self.assertEqual(nthreads, self.n_passed)
        self.assertTrue(len(cx_pool.sockets) > 1)
    def test_ensure_unique_index_threaded(self):
        coll = self.db.test_unique_threaded
        coll.drop()
        coll.insert_many([{'foo': i} for i in range(10000)])

        class Indexer(threading.Thread):
            def run(self):
                try:
                    coll.ensure_index('foo', unique=True)
                    coll.insert_one({'foo': 'bar'})
                    coll.insert_one({'foo': 'bar'})
                except OperationFailure:
                    pass

        threads = []
        for _ in range(10):
            t = Indexer()
            t.setDaemon(True)
            threads.append(t)

        for i in range(10):
            threads[i].start()

        joinall(threads)

        self.assertEqual(10001, coll.count())
        coll.drop()
    def test_threading(self):
        self.db.drop_collection("test")
        self.db.test.insert_many([{"x": i} for i in range(1000)])

        threads = []
        for i in range(10):
            t = SaveAndFind(self.db.test)
            t.start()
            threads.append(t)

        joinall(threads)
Beispiel #15
0
    def test_threading(self):
        self.db.drop_collection("test")
        self.db.test.insert_many([{"x": i} for i in range(1000)])

        threads = []
        for i in range(10):
            t = SaveAndFind(self.db.test)
            t.start()
            threads.append(t)

        joinall(threads)
    def test_threaded_reads(self):
        self.fs.upload_from_stream("test", b"hello")

        threads = []
        results = []
        for i in range(10):
            threads.append(JustRead(self.fs, 10, results))
            threads[i].start()

        joinall(threads)

        self.assertEqual(100 * [b'hello'], results)
    def test_threading(self):
        self.db.drop_collection("test")
        for i in xrange(1000):
            self.db.test.save({"x": i})

        threads = []
        for i in range(10):
            t = SaveAndFind(self.db.test)
            t.start()
            threads.append(t)

        joinall(threads)
    def test_threaded_reads(self):
        self.fs.put(b("hello"), _id="test")

        threads = []
        results = []
        for i in range(10):
            threads.append(JustRead(self.fs, 10, results))
            threads[i].start()

        joinall(threads)

        self.assertEqual(100 * [b("hello")], results)
    def test_threaded_reads(self):
        self.fs.put(b"hello", _id="test")

        threads = []
        results = []
        for i in range(10):
            threads.append(JustRead(self.fs, 10, results))
            threads[i].start()

        joinall(threads)

        self.assertEqual(100 * [b'hello'], results)
    def test_threading(self):
        self.db.drop_collection("test")
        for i in xrange(1000):
            self.db.test.save({"x": i})

        threads = []
        for i in range(10):
            t = SaveAndFind(self.db.test)
            t.start()
            threads.append(t)

        joinall(threads)
    def test_threaded_writes(self):
        threads = []
        for i in range(10):
            threads.append(JustWrite(self.fs, 10))
            threads[i].start()

        joinall(threads)

        f = self.fs.get_last_version("test")
        self.assertEqual(f.read(), b("hello"))

        # Should have created 100 versions of 'test' file
        self.assertEqual(100, self.db.fs.files.find({"filename": "test"}).count())
    def test_threaded_writes(self):
        threads = []
        for i in range(10):
            threads.append(JustWrite(self.fs, 10))
            threads[i].start()

        joinall(threads)

        f = self.fs.get_last_version("test")
        self.assertEqual(f.read(), b"hello")

        # Should have created 100 versions of 'test' file
        self.assertEqual(
            100, self.db.fs.files.count_documents({'filename': 'test'}))
    def test_threaded_reads(self):
        self.fs.upload_from_stream("test", b"hello")

        threads = []
        results = []
        for i in range(10):
            threads.append(JustRead(self.fs, 10, results))
            threads[i].start()

        joinall(threads)

        self.assertEqual(
            100 * [b'hello'],
            results
        )
    def test_threaded_writes(self):
        threads = []
        for i in range(10):
            threads.append(JustWrite(self.fs, 10))
            threads[i].start()

        joinall(threads)

        fstr = self.fs.open_download_stream_by_name("test")
        self.assertEqual(fstr.read(), b"hello")

        # Should have created 100 versions of 'test' file
        self.assertEqual(
            100,
            self.db.fs.files.find({'filename': 'test'}).count()
        )
Beispiel #25
0
    def test_auto_auth_login(self):
        client = rs_or_single_client_noauth()
        self.assertRaises(OperationFailure, client.auth_test.test.find_one)

        # Admin auth
        client.admin.authenticate(db_user, db_pwd)

        nthreads = 10
        threads = []
        for _ in range(nthreads):
            t = AutoAuthenticateThreads(client.auth_test.test, 10)
            t.start()
            threads.append(t)

        joinall(threads)

        for t in threads:
            self.assertTrue(t.success)
    def test_auto_auth_login(self):
        client = rs_or_single_client_noauth()
        self.assertRaises(OperationFailure, client.auth_test.test.find_one)

        # Admin auth
        client.admin.authenticate(db_user, db_pwd)

        nthreads = 10
        threads = []
        for _ in range(nthreads):
            t = AutoAuthenticateThreads(client.auth_test.test, 10)
            t.start()
            threads.append(t)

        joinall(threads)

        for t in threads:
            self.assertTrue(t.success)
Beispiel #27
0
    def test_low_network_timeout(self):
        db = None
        i = 0
        n = 10
        while db is None and i < n:
            try:
                db = get_connection(network_timeout=0.0001).pymongo_test
            except AutoReconnect:
                i += 1
        if i == n:
            raise SkipTest()

        threads = []
        for _ in range(4):
            t = IgnoreAutoReconnect(db.test, 100)
            t.start()
            threads.append(t)

        joinall(threads)
Beispiel #28
0
    def test_auto_auth_login(self):
        # Create the database upfront to workaround SERVER-39167.
        self.client.auth_test.test.insert_one({})
        self.addCleanup(self.client.drop_database, "auth_test")
        client = rs_or_single_client_noauth()
        self.assertRaises(OperationFailure, client.auth_test.test.find_one)

        # Admin auth
        client.admin.authenticate(db_user, db_pwd)

        nthreads = 10
        threads = []
        for _ in range(nthreads):
            t = AutoAuthenticateThreads(client.auth_test.test, 10)
            t.start()
            threads.append(t)

        joinall(threads)

        for t in threads:
            self.assertTrue(t.success)
    def test_auto_auth_login(self):
        # Create the database upfront to workaround SERVER-39167.
        self.client.auth_test.test.insert_one({})
        self.addCleanup(self.client.drop_database, "auth_test")
        client = rs_or_single_client_noauth()
        self.assertRaises(OperationFailure, client.auth_test.test.find_one)

        # Admin auth
        client.admin.authenticate(db_user, db_pwd)

        nthreads = 10
        threads = []
        for _ in range(nthreads):
            t = AutoAuthenticateThreads(client.auth_test.test, 10)
            t.start()
            threads.append(t)

        joinall(threads)

        for t in threads:
            self.assertTrue(t.success)
    def test_server_disconnect(self):
        # PYTHON-345, we need to make sure that threads' request sockets are
        # closed by disconnect().
        #
        # 1. Create a client with auto_start_request=True
        # 2. Start N threads and do a find() in each to get a request socket
        # 3. Pause all threads
        # 4. In the main thread close all sockets, including threads' request
        #       sockets
        # 5. In main thread, do a find(), which raises AutoReconnect and resets
        #       pool
        # 6. Resume all threads, do a find() in them
        #
        # If we've fixed PYTHON-345, then only one AutoReconnect is raised,
        # and all the threads get new request sockets.
        cx = get_client(auto_start_request=True)
        collection = cx.db.pymongo_test

        # acquire a request socket for the main thread
        collection.find_one()
        pool = get_pool(collection.database.connection)
        socket_info = pool._get_request_state()
        assert isinstance(socket_info, SocketInfo)
        request_sock = socket_info.sock

        state = FindPauseFind.create_shared_state(nthreads=10)

        threads = [
            FindPauseFind(collection, state) for _ in range(state.nthreads)
        ]

        # Each thread does a find(), thus acquiring a request socket
        for t in threads:
            t.start()

        # Wait for the threads to reach the rendezvous
        FindPauseFind.wait_for_rendezvous(state)

        try:
            # Simulate an event that closes all sockets, e.g. primary stepdown
            for t in threads:
                t.request_sock.close()

            # Finally, ensure the main thread's socket's last_checkout is
            # updated:
            collection.find_one()

            # ... and close it:
            request_sock.close()

            # Doing an operation on the client raises an AutoReconnect and
            # resets the pool behind the scenes
            self.assertRaises(AutoReconnect, collection.find_one)

        finally:
            # Let threads do a second find()
            FindPauseFind.resume_after_rendezvous(state)

        joinall(threads)

        for t in threads:
            self.assertTrue(t.passed, "%s threw exception" % t)