Beispiel #1
0
    def checkAutoReconnect(self):
        # Verify auto-reconnect
        db = DB(self._storage)
        try:
            c1 = db.open()
            r = c1.root()
            r['alpha'] = 1
            transaction.commit()
            c1.close()

            c1._storage._load_conn.close()
            c1._storage._store_conn.close()
            # ZODB5 implicitly calls sync
            # immediately when a connection is opened;
            # fake that here for older releases.
            c2 = db.open()
            self.assertIs(c2, c1)
            c2.sync()
            r = c2.root()
            self.assertEqual(r['alpha'], 1)
            r['beta'] = PersistentMapping()
            c2.add(r['beta'])
            transaction.commit()
            c2.close()
        finally:
            db.close()
Beispiel #2
0
    def checkAutoReconnectOnSync(self):
        # Verify auto-reconnect.
        db = DB(self._storage)
        try:
            c1 = db.open()
            r = c1.root()

            c1._storage._load_conn.close()
            c1._storage.sync()
            # ZODB5 calls sync when a connection is opened. Our monkey
            # patch on a Connection makes sure that works in earlier
            # versions, but we don't have that patch on ZODB5. So test
            # the storage directly. NOTE: The load connection must be open.
            # to trigger the actual sync.

            r = c1.root()
            r['alpha'] = 1
            transaction.commit()
            c1.close()

            c1._storage._load_conn.close()
            c1._storage._store_conn.close()

            c2 = db.open()
            self.assertIs(c2, c1)

            r = c2.root()
            self.assertEqual(r['alpha'], 1)
            r['beta'] = PersistentMapping()
            c2.add(r['beta'])
            transaction.commit()
            c2.close()
        finally:
            db.close()
Beispiel #3
0
    def checkAutoReconnect(self):
        # Verify auto-reconnect
        db = DB(self._storage)
        try:
            c1 = db.open()
            r = c1.root()
            r['alpha'] = 1
            transaction.commit()
            c1.close()

            c1._storage._load_conn.close()
            c1._storage._store_conn.close()
            # ZODB5 implicitly calls sync
            # immediately when a connection is opened;
            # fake that here for older releases.
            c2 = db.open()
            self.assertIs(c2, c1)
            c2.sync()
            r = c2.root()
            self.assertEqual(r['alpha'], 1)
            r['beta'] = PersistentMapping()
            c2.add(r['beta'])
            transaction.commit()
            c2.close()
        finally:
            db.close()
Beispiel #4
0
    def testLargeBlob(self):
        # Large blobs are chunked into multiple pieces, we want to know
        # if they come out the same way they went in.
        db = DB(self._storage)
        conn = db.open()
        blob = conn.root()[1] = ZODB.blob.Blob()
        size = sizeof_fmt(self.testsize)
        self._log('Creating %s blob file' % size)
        blob_file = blob.open('w')
        signature = random_file(self.testsize, blob_file)
        blob_file.close()
        self._log('Committing %s blob file' % size)
        transaction.commit()
        conn.close()

        # Clear the cache
        for base, _dir, files in os.walk('.'):
            for f in files:
                if f.endswith('.blob'):
                    ZODB.blob.remove_committed(os.path.join(base, f))

        # Re-download blob
        self._log('Caching %s blob file' % size)
        conn = db.open()
        with conn.root()[1].open('r') as blob:
            self._log('Creating signature for %s blob cache' % size)
            self.assertEqual(md5sum(blob), signature)

        conn.close()
        db.close()
    def checkConcurrentUpdates2Storages(self):
        self._storage = storage1 = self.openClientStorage()
        db1 = DB(storage1)
        storage2 = self.openClientStorage()
        db2 = DB(storage2)
        stop = threading.Event()

        cn = db1.open()
        tree = cn.root()["tree"] = OOBTree()
        transaction.commit()
        cn.close()

        # Run two threads that update the BTree
        cd = {}
        t1 = self.StressThread(self, db1, stop, 1, cd, 1)
        t2 = self.StressThread(self, db2, stop, 2, cd, 2)
        self.go(stop, cd, t1, t2)

        while db1.lastTransaction() != db2.lastTransaction():
            db1._storage.sync()
            db2._storage.sync()

        cn = db1.open()
        tree = cn.root()["tree"]
        self._check_tree(cn, tree)
        self._check_threads(tree, t1, t2)

        cn.close()
        db1.close()
        db2.close()
Beispiel #6
0
    def checkCrossDBInvalidations(self):
        db1 = DB(self.openClientStorage())
        c1 = db1.open()
        r1 = c1.root()

        r1["a"] = MinPO("a")
        transaction.commit()
        self.assertEqual(r1._p_state, 0)  # up-to-date

        db2 = DB(self.openClientStorage())
        r2 = db2.open().root()

        self.assertEqual(r2["a"].value, "a")

        r2["b"] = MinPO("b")
        transaction.commit()

        # Make sure the invalidation is received in the other client.
        # We've had problems with this timing out on "slow" and/or "very
        # busy" machines, so we increase the sleep time on each trip, and
        # are willing to wait quite a long time.
        for i in range(20):
            c1.sync()
            if r1._p_state == -1:
                break
            time.sleep(i / 10.0)
        self.assertEqual(r1._p_state, -1)  # ghost

        r1.keys()  # unghostify
        self.assertEqual(r1._p_serial, r2._p_serial)
        self.assertEqual(r1["b"].value, "b")

        db2.close()
        db1.close()
    def checkConcurrentUpdates2StoragesMT(self):
        self._storage = storage1 = self.openClientStorage()
        db1 = DB(storage1)
        db2 = DB(self.openClientStorage())
        stop = threading.Event()

        cn = db1.open()
        tree = cn.root()["tree"] = OOBTree()
        transaction.commit()
        cn.close()

        # Run three threads that update the BTree.
        # Two of the threads share a single storage so that it
        # is possible for both threads to read the same object
        # at the same time.

        cd = {}
        t1 = self.StressThread(self, db1, stop, 1, cd, 1, 3)
        t2 = self.StressThread(self, db2, stop, 2, cd, 2, 3, 0.01)
        t3 = self.StressThread(self, db2, stop, 3, cd, 3, 3, 0.01)
        self.go(stop, cd, t1, t2, t3)

        while db1.lastTransaction() != db2.lastTransaction():
            time.sleep(.1)

        time.sleep(.1)
        cn = db1.open()
        tree = cn.root()["tree"]
        self._check_tree(cn, tree)
        self._check_threads(tree, t1, t2, t3)

        cn.close()
        db1.close()
        db2.close()
Beispiel #8
0
    def checkCrossDBInvalidations(self):
        db1 = DB(self.openClientStorage())
        c1 = db1.open()
        r1 = c1.root()

        r1["a"] = MinPO("a")
        transaction.commit()
        self.assertEqual(r1._p_state, 0) # up-to-date

        db2 = DB(self.openClientStorage())
        r2 = db2.open().root()

        self.assertEqual(r2["a"].value, "a")

        r2["b"] = MinPO("b")
        transaction.commit()

        # Make sure the invalidation is received in the other client.
        # We've had problems with this timing out on "slow" and/or "very
        # busy" machines, so we increase the sleep time on each trip, and
        # are willing to wait quite a long time.
        for i in range(20):
            c1.sync()
            if r1._p_state == -1:
                break
            time.sleep(i / 10.0)
        self.assertEqual(r1._p_state, -1) # ghost

        r1.keys() # unghostify
        self.assertEqual(r1._p_serial, r2._p_serial)
        self.assertEqual(r1["b"].value, "b")

        db2.close()
        db1.close()
Beispiel #9
0
    def testLargeBlob(self):
        # Large blobs are chunked into multiple pieces, we want to know
        # if they come out the same way they went in.
        db = DB(self._storage)
        conn = db.open()
        blob = conn.root()[1] = ZODB.blob.Blob()
        blob_file = blob.open('w')
        signature = self._random_file(self.testsize, blob_file)
        blob_file.close()
        transaction.commit()
        conn.close()

        # Clear the cache
        for base, _dir, files in os.walk('.'):
            for f in files:
                if f.endswith('.blob'):
                    ZODB.blob.remove_committed(os.path.join(base, f))

        # Re-download blob
        conn = db.open()
        with conn.root()[1].open('r') as blob:
            self.assertEqual(self._md5sum(blob), signature)

        conn.close()
        db.close()
Beispiel #10
0
    def checkAutoReconnectOnSync(self):
        # Verify auto-reconnect.
        db = DB(self._storage)
        try:
            c1 = db.open()
            r = c1.root()

            c1._storage._load_conn.close()
            c1._storage.sync()
            # ZODB5 calls sync when a connection is opened. Our monkey
            # patch on a Connection makes sure that works in earlier
            # versions, but we don't have that patch on ZODB5. So test
            # the storage directly. NOTE: The load connection must be open.
            # to trigger the actual sync.

            r = c1.root()
            r['alpha'] = 1
            transaction.commit()
            c1.close()

            c1._storage._load_conn.close()
            c1._storage._store_conn.close()

            c2 = db.open()
            self.assertIs(c2, c1)

            r = c2.root()
            self.assertEqual(r['alpha'], 1)
            r['beta'] = PersistentMapping()
            c2.add(r['beta'])
            transaction.commit()
            c2.close()
        finally:
            db.close()
Beispiel #11
0
    def checkConcurrentUpdates2Storages(self):
        self._storage = storage1 = self.openClientStorage()
        storage2 = self.openClientStorage()
        db1 = DB(storage1)
        db2 = DB(storage2)
        stop = threading.Event()

        cn = db1.open()
        tree = cn.root()["tree"] = OOBTree()
        transaction.commit()
        cn.close()

        # Run two threads that update the BTree
        cd = {}
        t1 = self.StressThread(db1, stop, 1, cd, 1)
        t2 = self.StressThread(db2, stop, 2, cd, 2)
        self.go(stop, cd, t1, t2)

        while db1.lastTransaction() != db2.lastTransaction():
            db1._storage.sync()
            db2._storage.sync()

        cn = db1.open()
        tree = cn.root()["tree"]
        self._check_tree(cn, tree)
        self._check_threads(tree, t1, t2)

        cn.close()
        db1.close()
        db2.close()
Beispiel #12
0
    def checkCrossConnectionInvalidation(self):
        # Verify connections see updated state at txn boundaries
        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['myobj'] = 'yes'
            c2 = db.open()
            r2 = c2.root()
            self.assertNotIn('myobj', r2)

            storage = c1._storage
            t = transaction.Transaction()
            t.description = u'invalidation test'
            c1.tpc_begin(t)
            c1.commit(t)
            storage.tpc_vote(storage._transaction)
            storage.tpc_finish(storage._transaction)

            self.assertNotIn('myobj', r2)
            c2.sync()
            self.assertIn('myobj', r2)
            self.assertEqual(r2['myobj'], 'yes')
        finally:
            db.close()
Beispiel #13
0
    def checkHistoricalConnection(self):
        import persistent
        import ZODB.POSException
        db = DB(self._storage)
        conn = db.open()
        root = conn.root()

        root['first'] = persistent.mapping.PersistentMapping(count=0)
        transaction.commit()

        time_of_first_transaction = conn._storage.lastTransaction()

        root['second'] = persistent.mapping.PersistentMapping()
        root['first']['count'] += 1
        transaction.commit()

        transaction1 = transaction.TransactionManager(explicit=True)

        historical_conn = db.open(transaction_manager=transaction1,
                                  at=time_of_first_transaction)

        eq = self.assertEqual

        # regular connection sees present:

        eq(sorted(conn.root().keys()), ['first', 'second'])
        eq(conn.root()['first']['count'], 1)

        # historical connection sees past:
        transaction1.begin()
        eq(sorted(historical_conn.root().keys()), ['first'])
        eq(historical_conn.root()['first']['count'], 0)

        # Can't change history:

        historical_conn.root()['first']['count'] += 1
        eq(historical_conn.root()['first']['count'], 1)
        self.assertRaises(ZODB.POSException.ReadOnlyHistoryError,
                          transaction1.commit)
        transaction1.abort()
        eq(historical_conn.root()['first']['count'], 0)

        # Making a change in the present
        root['third'] = 3
        transaction.commit()

        # Is also not reflected in the past, even after explicit sync,
        transaction1.begin()
        eq(sorted(historical_conn.root().keys()), ['first'])
        eq(historical_conn.root()['first']['count'], 0)
        # Since we cannot change anything, we cannot join a transaction either.
        # The afterCompletion call is never invoked.
        historical_conn._storage._storage.afterCompletion = lambda: self.fail(
            "Not called")
        transaction1.commit()

        historical_conn.close()
        conn.close()
        db.close()
 def checkClosingNestedDatabasesWorks(self):
     # This tests for the error described in
     # https://github.com/zopefoundation/ZODB/issues/45
     db1 = DB(self._storage)
     db2 = DB(None, databases=db1.databases, database_name='2')
     db1.open().get_connection('2')
     db1.close()
     db2.close()
Beispiel #15
0
    def checkCachePolling(self):
        # NOTE: This test still sets poll_interval, a deprecated
        # option that does nothing. We keep it around to verify that
        # this scenario still works either way.
        self._storage = self.make_storage(
            poll_interval=3600, share_local_cache=False)

        db = DB(self._storage)
        try:
            # Set up the database.
            tm1 = transaction.TransactionManager()
            c1 = db.open(transaction_manager=tm1)
            r1 = c1.root()
            r1['obj'] = obj1 = PersistentMapping({'change': 0})
            tm1.commit()

            # Load and change the object in an independent connection.
            tm2 = transaction.TransactionManager()
            c2 = db.open(transaction_manager=tm2)
            r2 = c2.root()
            r2['obj']['change'] = 1
            tm2.commit()
            # Now c2 has delta_after0.
            self.assertEqual(len(c2._storage._cache.delta_after0), 1)
            c2.close()

            # Change the object in the original connection.
            c1.sync()
            obj1['change'] = 2
            tm1.commit()

            # Close the database connection to c2.
            c2._storage._drop_load_connection()

            # Make the database connection to c2 reopen without polling.
            c2._storage.load(b'\0' * 8, '')
            self.assertTrue(c2._storage._load_transaction_open)

            # Open a connection, which should be the same connection
            # as c2.
            c3 = db.open(transaction_manager=tm2)
            self.assertTrue(c3 is c2)
            self.assertEqual(len(c2._storage._cache.delta_after0), 1)

            # Clear the caches (but not delta_after*)
            c3._resetCache()
            for client in c3._storage._cache.clients_local_first:
                client.flush_all()

            obj3 = c3.root()['obj']
            # Should have loaded the new object.
            self.assertEqual(obj3['change'], 2)

        finally:
            db.close()
Beispiel #16
0
    def checkCachePolling(self):
        # NOTE: This test still sets poll_interval, a deprecated
        # option that does nothing. We keep it around to verify that
        # this scenario still works either way.
        self._storage = self.make_storage(poll_interval=3600,
                                          share_local_cache=False)

        db = DB(self._storage)
        try:
            # Set up the database.
            tm1 = transaction.TransactionManager()
            c1 = db.open(transaction_manager=tm1)
            r1 = c1.root()
            r1['obj'] = obj1 = PersistentMapping({'change': 0})
            tm1.commit()

            # Load and change the object in an independent connection.
            tm2 = transaction.TransactionManager()
            c2 = db.open(transaction_manager=tm2)
            r2 = c2.root()
            r2['obj']['change'] = 1
            tm2.commit()
            # Now c2 has delta_after0.
            self.assertEqual(len(c2._storage._cache.delta_after0), 1)
            c2.close()

            # Change the object in the original connection.
            c1.sync()
            obj1['change'] = 2
            tm1.commit()

            # Close the database connection to c2.
            c2._storage._drop_load_connection()

            # Make the database connection to c2 reopen without polling.
            c2._storage.load(b'\0' * 8, '')
            self.assertTrue(c2._storage._load_transaction_open)

            # Open a connection, which should be the same connection
            # as c2.
            c3 = db.open(transaction_manager=tm2)
            self.assertTrue(c3 is c2)
            self.assertEqual(len(c2._storage._cache.delta_after0), 1)

            # Clear the caches (but not delta_after*)
            c3._resetCache()
            for client in c3._storage._cache.clients_local_first:
                client.flush_all()

            obj3 = c3.root()['obj']
            # Should have loaded the new object.
            self.assertEqual(obj3['change'], 2)

        finally:
            db.close()
Beispiel #17
0
    def checkPollInterval(self, shared_cache=True):
        # Verify the poll_interval parameter causes RelStorage to
        # delay invalidation polling.
        self._storage = self.make_storage(
            poll_interval=3600, share_local_cache=shared_cache)

        db = DB(self._storage)
        try:
            tm1 = transaction.TransactionManager()
            c1 = db.open(transaction_manager=tm1)
            r1 = c1.root()
            r1['alpha'] = 1
            tm1.commit()

            tm2 = transaction.TransactionManager()
            c2 = db.open(transaction_manager=tm2)
            r2 = c2.root()
            self.assertEqual(r2['alpha'], 1)
            self.assertFalse(c2._storage.need_poll())
            self.assertTrue(c2._storage._poll_at > 0)

            r1['alpha'] = 2
            # commit c1 without committing c2.
            tm1.commit()

            if shared_cache:
                # The cache reveals that a poll is needed even though
                # the poll timeout has not expired.
                self.assertTrue(c2._storage.need_poll())
                tm2.commit()
                r2 = c2.root()
                self.assertEqual(r2['alpha'], 2)
                self.assertFalse(c2._storage.need_poll())
            else:
                # The poll timeout has not expired, so no poll should occur
                # yet, even after a commit.
                self.assertFalse(c2._storage.need_poll())
                tm2.commit()
                r2 = c2.root()
                self.assertEqual(r2['alpha'], 1)

            # expire the poll timer and verify c2 sees the change
            c2._storage._poll_at -= 3601
            tm2.commit()
            r2 = c2.root()
            self.assertEqual(r2['alpha'], 2)

            c2.close()
            c1.close()

        finally:
            db.close()
class TestCacheCleanup(unittest.TestCase):
    def setUp(self):
        from ZODB.DB import DB
        from ZODB.MappingStorage import MappingStorage
        import transaction

        storage = MappingStorage()
        self.db = DB(storage)
        conn = self.db.open()
        root = conn.root()
        root['keepme'] = KeepMe()
        root['keepme']['extra'] = PersistentMapping()
        transaction.commit()
        conn.close()
        conn._resetCache()

    def tearDown(self):
        self.db.close()

    def assertConnectionOpened(self, conn):  #pragma NO COVERAGE
        # Attribute name changes across ZODB versions
        opened = getattr(conn, '_opened', None)
        if opened is None:
            opened = conn.opened
        self.assertNotEqual(opened, None)

    def test_cleanup(self):
        from repoze.zodbconn.cachecleanup import CacheCleanup
        connection_key = 'connection'

        def myapp(environ, start_response):
            conn = environ[connection_key]
            root = conn.root()
            root['keepme']['extra'].values()  # load objects
            self.assertConnectionOpened(conn)
            self.assertEqual(root._p_changed, False)
            self.assertEqual(root['keepme']._p_changed, False)
            self.assertEqual(root['keepme']['extra']._p_changed, False)

        regexes = 'repoze.zodbconn.tests.test_cachecleanup:KeepMe'
        cleaner = CacheCleanup(myapp, regexes, connection_key=connection_key)

        # run this test twice to test class caching
        for i in range(2):
            conn = self.db.open()
            root = conn.root()
            environ = {connection_key: conn}
            cleaner(environ, None)
            self.assertEqual(root._p_changed, False)
            self.assertEqual(root['keepme']._p_changed, False)
            self.assertEqual(root['keepme']['extra']._p_changed, None)
class LexiconConflictTests(unittest.TestCase):

    db = None

    def _getTargetClass(self):
        from Products.ZCTextIndex.Lexicon import Lexicon
        return Lexicon

    def _makeOne(self, *args, **kw):
        return self._getTargetClass()(*args, **kw)

    def tearDown(self):
        if self.db is not None:
            self.db.close()
            self.storage.cleanup()

    def openDB(self):
        from ZODB.DB import DB
        from ZODB.FileStorage import FileStorage

        n = 'fs_tmp__%s' % os.getpid()
        self.storage = FileStorage(n)
        self.db = DB(self.storage)

    def testAddWordConflict(self):
        from Products.ZCTextIndex.Lexicon import Splitter

        self.l = self._makeOne(Splitter())
        self.openDB()
        r1 = self.db.open().root()
        r1['l'] = self.l
        transaction.commit()

        r2 = self.db.open().root()
        copy = r2['l']
        # Make sure the data is loaded
        list(copy._wids.items())
        list(copy._words.items())
        copy.length()

        self.assertEqual(self.l._p_serial, copy._p_serial)

        self.l.sourceToWordIds('mary had a little lamb')
        transaction.commit()

        copy.sourceToWordIds('whose fleece was')
        copy.sourceToWordIds('white as snow')
        transaction.commit()
        self.assertEqual(copy.length(), 11)
        self.assertEqual(copy.length(), len(copy._words))
class LexiconConflictTests(unittest.TestCase):

    db = None

    def _getTargetClass(self):
        from Products.ZCTextIndex.Lexicon import Lexicon
        return Lexicon

    def _makeOne(self, *args, **kw):
        return self._getTargetClass()(*args, **kw)

    def tearDown(self):
        if self.db is not None:
            self.db.close()
            self.storage.cleanup()

    def openDB(self):
        from ZODB.DB import DB
        from ZODB.FileStorage import FileStorage

        n = 'fs_tmp__{0}'.format(os.getpid())
        self.storage = FileStorage(n)
        self.db = DB(self.storage)

    def testAddWordConflict(self):
        from Products.ZCTextIndex.Lexicon import Splitter

        self.lex = self._makeOne(Splitter())
        self.openDB()
        r1 = self.db.open().root()
        r1['lex'] = self.lex
        transaction.commit()

        r2 = self.db.open().root()
        copy = r2['lex']
        # Make sure the data is loaded
        list(copy._wids.items())
        list(copy._words.items())
        copy.length()

        self.assertEqual(self.lex._p_serial, copy._p_serial)

        self.lex.sourceToWordIds('mary had a little lamb')
        transaction.commit()

        copy.sourceToWordIds('whose fleece was')
        copy.sourceToWordIds('white as snow')
        transaction.commit()
        self.assertEqual(copy.length(), 11)
        self.assertEqual(copy.length(), len(copy._words))
Beispiel #21
0
    def checkBTreesLengthStress(self):
        # BTrees.Length objects are unusual Persistent objects: they
        # set _p_independent and they frequently invoke conflict
        # resolution. Run a stress test on them.
        updates_per_thread = 50
        thread_count = 4

        from BTrees.Length import Length

        db = DB(self._storage)
        try:
            c = db.open()
            try:
                c.root()["length"] = Length()
                transaction.commit()
            finally:
                c.close()

            def updater():
                thread_db = DB(self._storage)
                for _ in range(updates_per_thread):
                    thread_c = thread_db.open()
                    try:
                        thread_c.root()["length"].change(1)
                        time.sleep(random.random() * 0.05)
                        transaction.commit()
                    finally:
                        thread_c.close()

            import threading

            threads = []
            for _ in range(thread_count):
                t = threading.Thread(target=updater)
                threads.append(t)
            for t in threads:
                t.start()
            for t in threads:
                t.join(120)

            c = db.open()
            try:
                self.assertEqual(c.root()["length"](), updates_per_thread * thread_count)
            finally:
                transaction.abort()
                c.close()

        finally:
            db.close()
Beispiel #22
0
    def checkPackOldUnreferenced(self):
        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['A'] = PersistentMapping()
            B = PersistentMapping()
            r1['A']['B'] = B
            transaction.get().note('add A then add B to A')
            transaction.commit()

            del r1['A']['B']
            transaction.get().note('remove B from A')
            transaction.commit()

            r1['A']['C'] = ''
            transaction.get().note('add C to A')
            transaction.commit()

            now = packtime = time.time()
            while packtime <= now:
                packtime = time.time()
            self._storage.pack(packtime, referencesf)

            # B should be gone, since nothing refers to it.
            self.assertRaises(KeyError, self._storage.load, B._p_oid, '')

        finally:
            db.close()
Beispiel #23
0
    def testRedo(self):
        database = DB(self._storage)
        connection = database.open()
        root = connection.root()
        blob = Blob()

        transaction.begin()
        blob.open('w').write(b('this is state 1'))
        root['blob'] = blob
        transaction.commit()

        transaction.begin()
        blob = root['blob']
        blob.open('w').write(b('this is state 2'))
        transaction.commit()

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        self.assertEqual(blob.open('r').read(), b('this is state 1'))

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        self.assertEqual(blob.open('r').read(), b('this is state 2'))

        database.close()
Beispiel #24
0
class BlobStorage(object):

    def __init__(self):
        """Prepares for a functional test case.
        """
        transaction.abort()

        storage = DemoStorage("Demo Storage")

        if not IBlobStorage.providedBy(storage):
            raise RuntimeError

        self.db = DB(storage)
        self.connection = None

    def clean(self):
        """Cleans up after a functional test case.
        """
        transaction.abort()
        if self.connection:
            self.connection.close()
            self.connection = None
        self.db.close()

    def close(self):
        if self.connection:
            self.connection.close()
            self.connection = None
        self.db.close()

    def open(self):
        if self.connection:
            self.close()
        self.connection = self.db.open()
        return self.connection.root()
Beispiel #25
0
    def checkConcurrentUpdates2Storages_emulated(self):
        self._storage = storage1 = self.openClientStorage()
        storage2 = self.openClientStorage()
        db1 = DB(storage1)
        db2 = DB(storage2)

        cn = db1.open()
        tree = cn.root()["tree"] = OOBTree()
        transaction.commit()
        # DM: allow time for invalidations to come in and process them
        time.sleep(0.1)

        # Run two threads that update the BTree
        t1 = StressTask(
            db1,
            1,
            1,
        )
        t2 = StressTask(
            db2,
            2,
            2,
        )
        _runTasks(100, t1, t2)

        cn.sync()
        self._check_tree(cn, tree)
        self._check_threads(tree, t1, t2)

        cn.close()
        db1.close()
        db2.close()
Beispiel #26
0
class ZODBLayer(Security):

    def setUp(self, args=None):
        super(ZODBLayer, self).make_app()
        self.tempdir = tempfile.mkdtemp()
        self.zodb_connection = None
        self.init_zodb()

    def tearDown(self):
        super(ZODBLayer, self).tearDown()
        self.zodb.close()
        shutil.rmtree(self.tempdir)

    def new_request(self):
        request = super(ZODBLayer, self).new_request()
        request.environ['repoze.zodbconn.connection'] = self.zodb_connection
        return request

    def init_zodb(self):
        if hasattr(self, 'zodb') and self.zodb:
            self.zodb_connection.close()
            self.zodb.close()
        filestorage_dir = os.path.join(self.tempdir, 'Data.fs')
        filestorage = FileStorage(filestorage_dir)
        blobstorage_dir = os.path.join(self.tempdir, 'blobstorage')
        blobstorage = BlobStorage(
            blobstorage_dir,
            filestorage,
            layout='automatic'
        )
        self.zodb = DB(blobstorage)
        self.zodb_connection = self.zodb.open()

    def zodb_root(self):
        return self.zodb_connection.root()
Beispiel #27
0
    def test_pack_with_1_day(self):
        from ZODB.DB import DB
        from ZODB.FileStorage import FileStorage
        from ZODB.POSException import POSKeyError
        import time
        import transaction
        from relstorage.zodbpack import main

        storage = FileStorage(self.db_fn, create=True)
        db = DB(storage)
        conn = db.open()
        conn.root()['x'] = 1
        transaction.commit()
        oid = b('\0' * 8)
        state, serial = storage.load(oid, b(''))
        time.sleep(0.1)
        conn.root()['x'] = 2
        transaction.commit()
        conn.close()
        self.assertEqual(state, storage.loadSerial(oid, serial))
        db.close()
        storage = None

        main(['', '--days=1', self.cfg_fn])

        # packing should not have removed the old state.
        storage = FileStorage(self.db_fn)
        self.assertEqual(state, storage.loadSerial(oid, serial))
        storage.close()
Beispiel #28
0
    def checkPackOldUnreferenced(self):
        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['A'] = PersistentMapping()
            A_B = PersistentMapping()
            r1['A']['B'] = A_B
            transaction.get().note(u'add A then add B to A')
            transaction.commit()

            del r1['A']['B']
            transaction.get().note(u'remove B from A')
            transaction.commit()

            r1['A']['C'] = ''
            transaction.get().note(u'add C (non-persistent) to A')
            transaction.commit()

            packtime = c1._storage.lastTransactionInt()
            self._storage.pack(packtime, referencesf)

            # B should be gone, since nothing refers to it.
            with self.assertRaises(KeyError):
                __traceback_info__ = bytes8_to_int64(A_B._p_oid)
                self._storage.load(A_B._p_oid)

        finally:
            db.close()
Beispiel #29
0
    def testSimpleBlobRecovery(self):
        self.assertTrue(
            ZODB.interfaces.IBlobStorageRestoreable.providedBy(self._storage))
        db = DB(self._storage)
        conn = db.open()
        root = conn.root()
        root._p_activate()
        root[1] = ZODB.blob.Blob()
        transaction.commit()
        root[2] = ZODB.blob.Blob()
        with root[2].open('w') as f:
            f.write(b'some data')
        transaction.commit()
        root[3] = ZODB.blob.Blob()
        with root[3].open('w') as f:
            f.write((b''.join(
                struct.pack(">I", random.randint(0, (1 << 32) - 1))
                for i in range(random.randint(10000, 20000)))
                     )[:-random.randint(1, 4)])
        transaction.commit()
        root[2] = ZODB.blob.Blob()
        with root[2].open('w') as f:
            f.write(b'some other data')
        transaction.commit()
        __traceback_info__ = self._storage, self._dst
        self._dst.copyTransactionsFrom(self._storage)

        self.compare(self._storage, self._dst)

        conn.close()
        db.close()
Beispiel #30
0
    def checkPackGC(self, expect_object_deleted=True, close=True):
        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['alpha'] = PersistentMapping()
            transaction.commit()

            oid = r1['alpha']._p_oid
            r1['alpha'] = None
            transaction.commit()

            # The object should still exist
            self._storage.load(oid, '')

            # Pack
            now = packtime = time.time()
            while packtime <= now:
                packtime = time.time()
            self._storage.pack(packtime, referencesf)
            self._storage.sync()

            if expect_object_deleted:
                # The object should now be gone
                self.assertRaises(KeyError, self._storage.load, oid, '')
            else:
                # The object should still exist
                self._storage.load(oid, '')
        finally:
            if close:
                db.close()
        return oid
Beispiel #31
0
    def checkNonASCIITransactionMetadata(self):
        # Verify the database stores and retrieves non-ASCII text
        # in transaction metadata.
        ugly_string = ''.join(chr(c) for c in range(256))
        if isinstance(ugly_string, bytes):
            # Always text. Use latin 1 because it can decode any arbitrary
            # bytes.
            ugly_string = ugly_string.decode('latin-1')

        # The storage layer is defined to take bytes (implicitly in
        # older ZODB releases, explicitly in ZODB 5.something), but historically
        # it can accept either text or bytes. However, it always returns bytes
        check_string = ugly_string.encode("utf-8")

        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['alpha'] = 1
            transaction.get().setUser(ugly_string)
            transaction.commit()
            r1['alpha'] = 2
            transaction.get().note(ugly_string)
            transaction.commit()

            info = self._storage.undoInfo()
            self.assertEqual(info[0]['description'], check_string)
            self.assertEqual(info[1]['user_name'], b'/ ' + check_string)
        finally:
            db.close()
Beispiel #32
0
    def checkPackOldUnreferenced(self):
        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['A'] = PersistentMapping()
            B = PersistentMapping()
            r1['A']['B'] = B
            transaction.get().note('add A then add B to A')
            transaction.commit()

            del r1['A']['B']
            transaction.get().note('remove B from A')
            transaction.commit()

            r1['A']['C'] = ''
            transaction.get().note('add C to A')
            transaction.commit()

            now = packtime = time.time()
            while packtime <= now:
                packtime = time.time()
            self._storage.pack(packtime, referencesf)

            # B should be gone, since nothing refers to it.
            self.assertRaises(KeyError, self._storage.load, B._p_oid, '')

        finally:
            db.close()
Beispiel #33
0
class DbAdapter:
    def __init__(self, path="data.db"):
        self.path = path

    def connect(self):
        self.storage = FileStorage(self.path)
        self.db = DB(self.storage)
        self.conn = self.db.open()
        return self.conn.root()

    def begin_transaction(self):
        transaction.begin()

    def commit(self):
        transaction.commit()

    def rollback(self):
        transaction.abort()

    def disconnect(self):
        self.conn.close()
        self.db.close()
        self.storage.close()
        if os.path.exists(self.path + ".lock"):
            os.remove(self.path + ".lock")
Beispiel #34
0
    def checkPackBatchLockNoWait(self):
        # Exercise the code in the pack algorithm that attempts to get the
        # commit lock but will sleep if the lock is busy.
        self._storage = self.make_storage(pack_batch_timeout=0)

        adapter = self._storage._adapter
        test_conn, test_cursor = adapter.connmanager.open()

        slept = []
        def sim_sleep(seconds):
            slept.append(seconds)
            adapter.locker.release_commit_lock(test_cursor)
            test_conn.rollback()
            adapter.connmanager.close(test_conn, test_cursor)

        db = DB(self._storage)
        try:
            # add some data to be packed
            c = db.open()
            r = c.root()
            r['alpha'] = PersistentMapping()
            transaction.commit()
            del r['alpha']
            transaction.commit()

            # Pack, with a commit lock held
            now = packtime = time.time()
            while packtime <= now:
                packtime = time.time()
            adapter.locker.hold_commit_lock(test_cursor)
            self._storage.pack(packtime, referencesf, sleep=sim_sleep)

            self.assertTrue(len(slept) > 0)
        finally:
            db.close()
Beispiel #35
0
def get_db_connection(blob_dir):
    storage = MappingStorage('test')
    blob_storage = BlobStorage(blob_dir, storage)
    db = DB(blob_storage)
    conn = db.open()
    create_app_root(conn)
    return conn
Beispiel #36
0
    def testRedo(self):
        database = DB(self._storage)
        connection = database.open()
        root = connection.root()
        blob = Blob()

        transaction.begin()
        blob.open('w').write(b('this is state 1'))
        root['blob'] = blob
        transaction.commit()

        transaction.begin()
        blob = root['blob']
        blob.open('w').write(b('this is state 2'))
        transaction.commit()

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        self.assertEqual(blob.open('r').read(), b('this is state 1'))

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        self.assertEqual(blob.open('r').read(), b('this is state 2'))

        database.close()
Beispiel #37
0
    def testUndo(self):
        database = DB(self._storage)
        connection = database.open()
        root = connection.root()
        transaction.begin()
        blob = Blob()
        with blob.open('w') as f:
            f.write(b'this is state 1')
        root['blob'] = blob
        transaction.commit()

        transaction.begin()
        blob = root['blob']
        with blob.open('w') as f:
            f.write(b'this is state 2')
        transaction.commit()

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        with blob.open('r') as f:
            data = f.read()
        self.assertEqual(data, b'this is state 1')

        database.close()
Beispiel #38
0
 def testSimpleBlobRecovery(self):
     if hasattr(ZODB.interfaces, 'IBlobStorageRestoreable'):
         self.assert_(
             ZODB.interfaces.IBlobStorageRestoreable.providedBy(
                 self._storage)
             )
     db = DB(self._storage)
     conn = db.open()
     conn.root()[1] = ZODB.blob.Blob()
     transaction.commit()
     conn.root()[2] = ZODB.blob.Blob()
     conn.root()[2].open('w').write(b('some data'))
     transaction.commit()
     conn.root()[3] = ZODB.blob.Blob()
     conn.root()[3].open('w').write(
         (b('').join(struct.pack(">I", random.randint(0, (1<<32)-1))
                  for i in range(random.randint(10000,20000)))
          )[:-random.randint(1,4)]
         )
     transaction.commit()
     conn.root()[2] = ZODB.blob.Blob()
     conn.root()[2].open('w').write(b('some other data'))
     transaction.commit()
     self._dst.copyTransactionsFrom(self._storage)
     self.compare(self._storage, self._dst)
Beispiel #39
0
    def checkNonASCIITransactionMetadata(self):
        # Verify the database stores and retrieves non-ASCII text
        # in transaction metadata.
        ugly_string = ''.join(chr(c) for c in range(256))
        if not isinstance(ugly_string, bytes):
            # Py3
            check_string = ugly_string.encode("latin-1")
        else:
            check_string = ugly_string

        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['alpha'] = 1
            transaction.get().setUser(ugly_string)
            transaction.commit()
            r1['alpha'] = 2
            transaction.get().note(ugly_string)
            transaction.commit()

            info = self._storage.undoInfo()
            self.assertEqual(info[0]['description'], check_string)
            self.assertEqual(info[1]['user_name'], b'/ ' + check_string)
        finally:
            db.close()
Beispiel #40
0
    def testUndoAfterConsumption(self):
        database = DB(self._storage)
        connection = database.open()
        root = connection.root()
        transaction.begin()
        with open('consume1', 'wb') as file:
            file.write(b'this is state 1')
        blob = Blob()
        blob.consumeFile('consume1')
        root['blob'] = blob
        transaction.commit()

        transaction.begin()
        blob = root['blob']
        with open('consume2', 'wb') as file:
            file.write(b'this is state 2')
        blob.consumeFile('consume2')
        transaction.commit()

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        with blob.open('r') as file:
            self.assertEqual(file.read(), b'this is state 1')

        database.close()
Beispiel #41
0
def zodb_root(zodb_storage):
    """Return root object of opened ZODB storage."""
    transaction.abort()
    db = DB(zodb_storage)
    connection = db.open()
    yield connection.root()
    connection.close()
Beispiel #42
0
def get_db_connection(blob_dir):
    storage = MappingStorage('test')
    blob_storage = BlobStorage(blob_dir, storage)
    db = DB(blob_storage)
    conn = db.open()
    create_app_root(conn)
    return conn
Beispiel #43
0
    def checkNonASCIITransactionMetadata(self):
        # Verify the database stores and retrieves non-ASCII text
        # in transaction metadata.
        ugly_string = ''.join(chr(c) for c in range(256))
        if not isinstance(ugly_string, bytes):
            # Py3
            check_string = ugly_string.encode("latin-1")
        else:
            check_string = ugly_string

        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['alpha'] = 1
            transaction.get().setUser(ugly_string)
            transaction.commit()
            r1['alpha'] = 2
            transaction.get().note(ugly_string)
            transaction.commit()

            info = self._storage.undoInfo()
            self.assertEqual(info[0]['description'], check_string)
            self.assertEqual(info[1]['user_name'], b'/ ' + check_string)
        finally:
            db.close()
Beispiel #44
0
    def checkNonASCIITransactionMetadata(self):
        # Verify the database stores and retrieves non-ASCII text
        # in transaction metadata.
        ugly_string = ''.join(chr(c) for c in range(256))
        if isinstance(ugly_string, bytes):
            # Always text. Use latin 1 because it can decode any arbitrary
            # bytes.
            ugly_string = ugly_string.decode('latin-1')

        # The storage layer is defined to take bytes (implicitly in
        # older ZODB releases, explicitly in ZODB 5.something), but historically
        # it can accept either text or bytes. However, it always returns bytes
        check_string = ugly_string.encode("utf-8")

        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['alpha'] = 1
            transaction.get().setUser(ugly_string)
            transaction.commit()
            r1['alpha'] = 2
            transaction.get().note(ugly_string)
            transaction.commit()

            info = self._storage.undoInfo()
            self.assertEqual(info[0]['description'], check_string)
            self.assertEqual(info[1]['user_name'], b'/ ' + check_string)
        finally:
            db.close()
Beispiel #45
0
    def testDeepCopyCanInvalidate(self):
        """
        Tests regression for invalidation problems related to missing
        readers and writers values in cloned objects (see
        http://mail.zope.org/pipermail/zodb-dev/2008-August/012054.html)
        """
        import ZODB.MappingStorage
        database = DB(ZODB.blob.BlobStorage(
            'blobs', ZODB.MappingStorage.MappingStorage()))
        connection = database.open()
        root = connection.root()
        transaction.begin()
        root['blob'] = Blob()
        transaction.commit()

        stream = StringIO()
        p = Pickler(stream, 1)
        p.dump(root['blob'])
        u = Unpickler(stream)
        stream.seek(0)
        clone = u.load()
        clone._p_invalidate()

        # it should also be possible to open the cloned blob
        # (even though it won't contain the original data)
        clone.open()

        # tearDown
        database.close()
Beispiel #46
0
    def _open_client (self, location):
        """Open 'location' (a (hostname, port_number) tuple) as a ZEO
        ClientStorage, and then open a ZODB database around that.  Return
        a (database, connection) tuple.
        
        We override dulcinea.database.ObjectDatabase._open_client to support
        username/password.
        """
        host, port = location
        if host == "":
            # If the specified hostname is the empty string, then
            # 'localhost' is used.
            location = ('localhost', port)
            
        site_config = get_config()
        username = site_config.get(self.site, 'zeo-username', fallback='')
        password = site_config.get(self.site, 'zeo-password', fallback='')

        # we use QonClientStorage instead of ClientStorage to:
        #  1. workaround ClientStorage's cache_size bug
        #  2. enable cache instrumentation (if qon.local.CACHE_INSTRUMENTATION is True)
        from qon.cache_logging import QonClientStorage        
        self.storage = QonClientStorage(location,
            var='/var/tmp',
            wait=0,
            cache_size=150*MB,
            username=username,
            password=password)
        
        db = DB(self.storage)
        
        return (db, db.open())
Beispiel #47
0
    def checkPackGC(self, expect_object_deleted=True):
        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['alpha'] = PersistentMapping()
            transaction.commit()

            oid = r1['alpha']._p_oid
            r1['alpha'] = None
            transaction.commit()

            # The object should still exist
            self._storage.load(oid, '')

            # Pack
            now = packtime = time.time()
            while packtime <= now:
                packtime = time.time()
            self._storage.pack(packtime, referencesf)
            self._storage.sync()

            if expect_object_deleted:
                # The object should now be gone
                self.assertRaises(KeyError, self._storage.load, oid, '')
            else:
                # The object should still exist
                self._storage.load(oid, '')
        finally:
            db.close()
        return oid
Beispiel #48
0
    def testUndoAfterConsumption(self):
        database = DB(self._storage)
        connection = database.open()
        root = connection.root()
        transaction.begin()
        with open('consume1', 'wb') as file:
            file.write(b'this is state 1')
        blob = Blob()
        blob.consumeFile('consume1')
        root['blob'] = blob
        transaction.commit()

        transaction.begin()
        blob = root['blob']
        with open('consume2', 'wb') as file:
            file.write(b'this is state 2')
        blob.consumeFile('consume2')
        transaction.commit()

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        with blob.open('r') as file:
            self.assertEqual(file.read(), b'this is state 1')

        database.close()
Beispiel #49
0
    def testUndo(self):
        database = DB(self._storage)
        connection = database.open()
        root = connection.root()
        transaction.begin()
        blob = Blob()
        with blob.open('w') as f:
            f.write(b'this is state 1')
        root['blob'] = blob
        transaction.commit()

        transaction.begin()
        blob = root['blob']
        with blob.open('w') as f:
            f.write(b'this is state 2')
        transaction.commit()


        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        with blob.open('r') as f:
            data = f.read()
        self.assertEqual(data, b'this is state 1')

        database.close()
Beispiel #50
0
    def checkPackWhileReferringObjectChanges(self):
        # Packing should not remove objects referenced by an
        # object that changes during packing.
        db = DB(self._storage)
        try:
            # add some data to be packed
            c = db.open()
            root = c.root()
            child = PersistentMapping()
            root['child'] = child
            transaction.commit()
            expect_oids = [child._p_oid]

            def inject_changes():
                # Change the database just after the list of objects
                # to analyze has been determined.
                child2 = PersistentMapping()
                root['child2'] = child2
                transaction.commit()
                expect_oids.append(child2._p_oid)

            adapter = self._storage._adapter
            adapter.packundo.on_filling_object_refs = inject_changes
            packtime = time.time()
            self._storage.pack(packtime, referencesf)

            # "The on_filling_object_refs hook should have been called once")
            self.assertEqual(len(expect_oids), 2, expect_oids)

            # Both children should still exist.
            self._storage.load(expect_oids[0], '')
            self._storage.load(expect_oids[1], '')
        finally:
            db.close()
Beispiel #51
0
    def testDeepCopyCanInvalidate(self):
        """
        Tests regression for invalidation problems related to missing
        readers and writers values in cloned objects (see
        http://mail.zope.org/pipermail/zodb-dev/2008-August/012054.html)
        """
        import ZODB.MappingStorage
        database = DB(
            ZODB.blob.BlobStorage('blobs',
                                  ZODB.MappingStorage.MappingStorage()))
        connection = database.open()
        root = connection.root()
        transaction.begin()
        root['blob'] = Blob()
        transaction.commit()

        stream = StringIO()
        p = Pickler(stream, 1)
        p.dump(root['blob'])
        u = Unpickler(stream)
        stream.seek(0)
        clone = u.load()
        clone._p_invalidate()

        # it should also be possible to open the cloned blob
        # (even though it won't contain the original data)
        clone.open()

        # tearDown
        database.close()