Пример #1
0
    def testRedo(self):
        base_storage = FileStorage(self.storagefile)
        blob_storage = BlobStorage(self.blob_dir, base_storage)
        database = DB(blob_storage)
        connection = database.open()
        root = connection.root()
        blob = Blob()

        transaction.begin()
        blob.open('w').write('this is state 1')
        root['blob'] = blob
        transaction.commit()

        transaction.begin()
        blob = root['blob']
        blob.open('w').write('this is state 2')
        transaction.commit()

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        self.assertEqual(blob.open('r').read(), 'this is state 1')

        serial = base64.encodestring(blob_storage._tid)

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        self.assertEqual(blob.open('r').read(), 'this is state 2')

        database.close()
Пример #2
0
    def testUndoAfterConsumption(self):
        base_storage = FileStorage(self.storagefile)
        blob_storage = BlobStorage(self.blob_dir, base_storage)
        database = DB(blob_storage)
        connection = database.open()
        root = connection.root()
        transaction.begin()
        open('consume1', 'w').write('this is state 1')
        blob = Blob()
        blob.consumeFile('consume1')
        root['blob'] = blob
        transaction.commit()

        transaction.begin()
        blob = root['blob']
        open('consume2', 'w').write('this is state 2')
        blob.consumeFile('consume2')
        transaction.commit()

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        self.assertEqual(blob.open('r').read(), 'this is state 1')

        database.close()
Пример #3
0
    def checkPackKeepNewObjects(self):
        # Packing should not remove objects created or modified after
        # the pack time, even if they are unreferenced.
        db = DB(self._storage)
        try:
            # add some data to be packed
            c = db.open()
            extra1 = PersistentMapping()
            c.add(extra1)
            extra2 = PersistentMapping()
            c.add(extra2)
            transaction.commit()

            # Choose the pack time
            now = packtime = time.time()
            while packtime <= now:
                time.sleep(0.1)
                packtime = time.time()
            while packtime == time.time():
                time.sleep(0.1)

            extra2.foo = "bar"
            extra3 = PersistentMapping()
            c.add(extra3)
            transaction.commit()

            self._storage.pack(packtime, referencesf)

            # extra1 should have been garbage collected
            self.assertRaises(KeyError, self._storage.load, extra1._p_oid, "")
            # extra2 and extra3 should both still exist
            self._storage.load(extra2._p_oid, "")
            self._storage.load(extra3._p_oid, "")
        finally:
            db.close()
Пример #4
0
    def checkPackBatchLockNoWait(self):
        # Exercise the code in the pack algorithm that attempts to get the
        # commit lock but will sleep if the lock is busy.
        self._storage._adapter.packundo.options.pack_batch_timeout = 0
        adapter = self._storage._adapter
        test_conn, test_cursor = adapter.connmanager.open()

        slept = []

        def sim_sleep(seconds):
            slept.append(seconds)
            adapter.locker.release_commit_lock(test_cursor)
            test_conn.rollback()
            adapter.connmanager.close(test_conn, test_cursor)

        db = DB(self._storage)
        try:
            # add some data to be packed
            c = db.open()
            r = c.root()
            r['alpha'] = PersistentMapping()
            transaction.commit()
            del r['alpha']
            transaction.commit()

            # Pack, with a commit lock held
            now = packtime = time.time()
            while packtime <= now:
                packtime = time.time()
            adapter.locker.hold_commit_lock(test_cursor)
            self._storage.pack(packtime, referencesf, sleep=sim_sleep)

            self.assertTrue(len(slept) > 0)
        finally:
            db.close()
Пример #5
0
    def testDeepCopyCanInvalidate(self):
        """
        Tests regression for invalidation problems related to missing
        readers and writers values in cloned objects (see
        http://mail.zope.org/pipermail/zodb-dev/2008-August/012054.html)
        """
        import ZODB.MappingStorage
        database = DB(
            ZODB.blob.BlobStorage('blobs',
                                  ZODB.MappingStorage.MappingStorage()))
        connection = database.open()
        root = connection.root()
        transaction.begin()
        root['blob'] = Blob()
        transaction.commit()

        stream = StringIO()
        p = Pickler(stream, 1)
        p.dump(root['blob'])
        u = Unpickler(stream)
        stream.seek(0)
        clone = u.load()
        clone._p_invalidate()

        # it should also be possible to open the cloned blob
        # (even though it won't contain the original data)
        clone.open()

        # tearDown
        database.close()
Пример #6
0
    def checkPackWhileReferringObjectChanges(self):
        # Packing should not remove objects referenced by an
        # object that changes during packing.
        db = DB(self._storage)
        try:
            # add some data to be packed
            c = db.open()
            root = c.root()
            child = PersistentMapping()
            root['child'] = child
            transaction.commit()
            expect_oids = [child._p_oid]

            def inject_changes():
                # Change the database just after the list of objects
                # to analyze has been determined.
                child2 = PersistentMapping()
                root['child2'] = child2
                transaction.commit()
                expect_oids.append(child2._p_oid)

            adapter = self._storage._adapter
            adapter.packundo.on_filling_object_refs = inject_changes
            packtime = time.time()
            self._storage.pack(packtime, referencesf)

            # "The on_filling_object_refs hook should have been called once")
            self.assertEqual(len(expect_oids), 2, expect_oids)

            # Both children should still exist.
            self._storage.load(expect_oids[0], '')
            self._storage.load(expect_oids[1], '')
        finally:
            db.close()
Пример #7
0
Файл: db.py Проект: tommed/qkweb
class DbAdapter:
    def __init__(self, path="data.db"):
        self.path = path

    def connect(self):
        self.storage = FileStorage(self.path)
        self.db = DB(self.storage)
        self.conn = self.db.open()
        return self.conn.root()

    def begin_transaction(self):
        transaction.begin()

    def commit(self):
        transaction.commit()

    def rollback(self):
        transaction.abort()

    def disconnect(self):
        self.conn.close()
        self.db.close()
        self.storage.close()
        if os.path.exists(self.path + ".lock"):
            os.remove(self.path + ".lock")
Пример #8
0
    def testRedo(self):
        database = DB(self._storage)
        connection = database.open()
        root = connection.root()
        blob = Blob()

        transaction.begin()
        blob.open('w').write(b('this is state 1'))
        root['blob'] = blob
        transaction.commit()

        transaction.begin()
        blob = root['blob']
        blob.open('w').write(b('this is state 2'))
        transaction.commit()

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        self.assertEqual(blob.open('r').read(), b('this is state 1'))

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        self.assertEqual(blob.open('r').read(), b('this is state 2'))

        database.close()
Пример #9
0
class Storage(object):

    def __init__(self):
        """Prepares for a functional test case.
        """
        # we prevent any craziness here
        transaction.abort()
        
        storage = DemoStorage("Demo Storage")
        self.db = DB(storage)
        self.connection = None

    def clean(self):
        """Cleans up after a functional test case.
        """
        transaction.abort()
        if self.connection:
            self.connection.close()
            self.connection = None
        self.db.close()

    def close(self):
        if self.connection:
            self.connection.close()
            self.connection = None
        self.db.close()

    def open(self):
        if self.connection:
            self.close()
        self.connection = self.db.open()
        return self.connection.root()
Пример #10
0
    def checkCrossDBInvalidations(self):
        db1 = DB(self.openClientStorage())
        c1 = db1.open()
        r1 = c1.root()

        r1["a"] = MinPO("a")
        transaction.commit()
        self.assertEqual(r1._p_state, 0)  # up-to-date

        db2 = DB(self.openClientStorage())
        r2 = db2.open().root()

        self.assertEqual(r2["a"].value, "a")

        r2["b"] = MinPO("b")
        transaction.commit()

        # Make sure the invalidation is received in the other client.
        # We've had problems with this timing out on "slow" and/or "very
        # busy" machines, so we increase the sleep time on each trip, and
        # are willing to wait quite a long time.
        for i in range(20):
            c1.sync()
            if r1._p_state == -1:
                break
            time.sleep(i / 10.0)
        self.assertEqual(r1._p_state, -1)  # ghost

        r1.keys()  # unghostify
        self.assertEqual(r1._p_serial, r2._p_serial)
        self.assertEqual(r1["b"].value, "b")

        db2.close()
        db1.close()
Пример #11
0
    def checkAutoReconnectOnSync(self):
        # Verify auto-reconnect.
        db = DB(self._storage)
        try:
            c1 = db.open()
            r = c1.root()

            c1._storage._load_conn.close()
            c1._storage.sync()
            # ZODB5 calls sync when a connection is opened. Our monkey
            # patch on a Connection makes sure that works in earlier
            # versions, but we don't have that patch on ZODB5. So test
            # the storage directly. NOTE: The load connection must be open.
            # to trigger the actual sync.

            r = c1.root()
            r['alpha'] = 1
            transaction.commit()
            c1.close()

            c1._storage._load_conn.close()
            c1._storage._store_conn.close()

            c2 = db.open()
            self.assertIs(c2, c1)

            r = c2.root()
            self.assertEqual(r['alpha'], 1)
            r['beta'] = PersistentMapping()
            c2.add(r['beta'])
            transaction.commit()
            c2.close()
        finally:
            db.close()
Пример #12
0
    def checkPackOldUnreferenced(self):
        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['A'] = PersistentMapping()
            A_B = PersistentMapping()
            r1['A']['B'] = A_B
            transaction.get().note(u'add A then add B to A')
            transaction.commit()

            del r1['A']['B']
            transaction.get().note(u'remove B from A')
            transaction.commit()

            r1['A']['C'] = ''
            transaction.get().note(u'add C (non-persistent) to A')
            transaction.commit()

            packtime = c1._storage.lastTransactionInt()
            self._storage.pack(packtime, referencesf)

            # B should be gone, since nothing refers to it.
            with self.assertRaises(KeyError):
                __traceback_info__ = bytes8_to_int64(A_B._p_oid)
                self._storage.load(A_B._p_oid)

        finally:
            db.close()
Пример #13
0
    def testLargeBlob(self):
        # Large blobs are chunked into multiple pieces, we want to know
        # if they come out the same way they went in.
        db = DB(self._storage)
        conn = db.open()
        blob = conn.root()[1] = ZODB.blob.Blob()
        blob_file = blob.open('w')
        signature = self._random_file(self.testsize, blob_file)
        blob_file.close()
        transaction.commit()
        conn.close()

        # Clear the cache
        for base, _dir, files in os.walk('.'):
            for f in files:
                if f.endswith('.blob'):
                    ZODB.blob.remove_committed(os.path.join(base, f))

        # Re-download blob
        conn = db.open()
        with conn.root()[1].open('r') as blob:
            self.assertEqual(self._md5sum(blob), signature)

        conn.close()
        db.close()
Пример #14
0
    def testSimpleBlobRecovery(self):
        self.assertTrue(
            ZODB.interfaces.IBlobStorageRestoreable.providedBy(self._storage))
        db = DB(self._storage)
        conn = db.open()
        root = conn.root()
        root._p_activate()
        root[1] = ZODB.blob.Blob()
        transaction.commit()
        root[2] = ZODB.blob.Blob()
        with root[2].open('w') as f:
            f.write(b'some data')
        transaction.commit()
        root[3] = ZODB.blob.Blob()
        with root[3].open('w') as f:
            f.write((b''.join(
                struct.pack(">I", random.randint(0, (1 << 32) - 1))
                for i in range(random.randint(10000, 20000)))
                     )[:-random.randint(1, 4)])
        transaction.commit()
        root[2] = ZODB.blob.Blob()
        with root[2].open('w') as f:
            f.write(b'some other data')
        transaction.commit()
        __traceback_info__ = self._storage, self._dst
        self._dst.copyTransactionsFrom(self._storage)

        self.compare(self._storage, self._dst)

        conn.close()
        db.close()
Пример #15
0
    def checkAutoReconnect(self):
        # Verify auto-reconnect
        db = DB(self._storage)
        try:
            c1 = db.open()
            r = c1.root()
            r['alpha'] = 1
            transaction.commit()
            c1.close()

            c1._storage._load_conn.close()
            c1._storage._store_conn.close()
            # ZODB5 implicitly calls sync
            # immediately when a connection is opened;
            # fake that here for older releases.
            c2 = db.open()
            self.assertIs(c2, c1)
            c2.sync()
            r = c2.root()
            self.assertEqual(r['alpha'], 1)
            r['beta'] = PersistentMapping()
            c2.add(r['beta'])
            transaction.commit()
            c2.close()
        finally:
            db.close()
Пример #16
0
    def checkConcurrentUpdates2Storages_emulated(self):
        self._storage = storage1 = self.openClientStorage()
        storage2 = self.openClientStorage()
        db1 = DB(storage1)
        db2 = DB(storage2)

        cn = db1.open()
        tree = cn.root()["tree"] = OOBTree()
        transaction.commit()
        # DM: allow time for invalidations to come in and process them
        time.sleep(0.1)

        # Run two threads that update the BTree
        t1 = StressTask(
            db1,
            1,
            1,
        )
        t2 = StressTask(
            db2,
            2,
            2,
        )
        _runTasks(100, t1, t2)

        cn.sync()
        self._check_tree(cn, tree)
        self._check_threads(tree, t1, t2)

        cn.close()
        db1.close()
        db2.close()
Пример #17
0
    def checkCrossConnectionInvalidation(self):
        # Verify connections see updated state at txn boundaries
        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['myobj'] = 'yes'
            c2 = db.open()
            r2 = c2.root()
            self.assertNotIn('myobj', r2)

            storage = c1._storage
            t = transaction.Transaction()
            t.description = u'invalidation test'
            c1.tpc_begin(t)
            c1.commit(t)
            storage.tpc_vote(storage._transaction)
            storage.tpc_finish(storage._transaction)

            self.assertNotIn('myobj', r2)
            c2.sync()
            self.assertIn('myobj', r2)
            self.assertEqual(r2['myobj'], 'yes')
        finally:
            db.close()
Пример #18
0
    def checkPackOldUnreferenced(self):
        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['A'] = PersistentMapping()
            B = PersistentMapping()
            r1['A']['B'] = B
            transaction.get().note('add A then add B to A')
            transaction.commit()

            del r1['A']['B']
            transaction.get().note('remove B from A')
            transaction.commit()

            r1['A']['C'] = ''
            transaction.get().note('add C to A')
            transaction.commit()

            now = packtime = time.time()
            while packtime <= now:
                packtime = time.time()
            self._storage.pack(packtime, referencesf)

            # B should be gone, since nothing refers to it.
            self.assertRaises(KeyError, self._storage.load, B._p_oid, '')

        finally:
            db.close()
Пример #19
0
    def checkNonASCIITransactionMetadata(self):
        # Verify the database stores and retrieves non-ASCII text
        # in transaction metadata.
        ugly_string = ''.join(chr(c) for c in range(256))
        if isinstance(ugly_string, bytes):
            # Always text. Use latin 1 because it can decode any arbitrary
            # bytes.
            ugly_string = ugly_string.decode('latin-1')

        # The storage layer is defined to take bytes (implicitly in
        # older ZODB releases, explicitly in ZODB 5.something), but historically
        # it can accept either text or bytes. However, it always returns bytes
        check_string = ugly_string.encode("utf-8")

        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['alpha'] = 1
            transaction.get().setUser(ugly_string)
            transaction.commit()
            r1['alpha'] = 2
            transaction.get().note(ugly_string)
            transaction.commit()

            info = self._storage.undoInfo()
            self.assertEqual(info[0]['description'], check_string)
            self.assertEqual(info[1]['user_name'], b'/ ' + check_string)
        finally:
            db.close()
Пример #20
0
    def testUndoAfterConsumption(self):
        database = DB(self._storage)
        connection = database.open()
        root = connection.root()
        transaction.begin()
        with open('consume1', 'wb') as file:
            file.write(b'this is state 1')
        blob = Blob()
        blob.consumeFile('consume1')
        root['blob'] = blob
        transaction.commit()

        transaction.begin()
        blob = root['blob']
        with open('consume2', 'wb') as file:
            file.write(b'this is state 2')
        blob.consumeFile('consume2')
        transaction.commit()

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        with blob.open('r') as file:
            self.assertEqual(file.read(), b'this is state 1')

        database.close()
Пример #21
0
    def checkPackOldUnreferenced(self):
        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['A'] = PersistentMapping()
            B = PersistentMapping()
            r1['A']['B'] = B
            transaction.get().note('add A then add B to A')
            transaction.commit()

            del r1['A']['B']
            transaction.get().note('remove B from A')
            transaction.commit()

            r1['A']['C'] = ''
            transaction.get().note('add C to A')
            transaction.commit()

            now = packtime = time.time()
            while packtime <= now:
                packtime = time.time()
            self._storage.pack(packtime, referencesf)

            # B should be gone, since nothing refers to it.
            self.assertRaises(KeyError, self._storage.load, B._p_oid, '')

        finally:
            db.close()
Пример #22
0
    def checkPackKeepNewObjects(self):
        # Packing should not remove objects created or modified after
        # the pack time, even if they are unreferenced.
        db = DB(self._storage)
        try:
            # add some data to be packed
            c = db.open()
            extra1 = PersistentMapping()
            c.add(extra1)
            extra2 = PersistentMapping()
            c.add(extra2)
            transaction.commit()

            # Choose the pack time
            now = packtime = time.time()
            while packtime <= now:
                time.sleep(0.1)
                packtime = time.time()
            while packtime == time.time():
                time.sleep(0.1)

            extra2.foo = 'bar'
            extra3 = PersistentMapping()
            c.add(extra3)
            transaction.commit()

            self._storage.pack(packtime, referencesf)

            # extra1 should have been garbage collected
            self.assertRaises(KeyError, self._storage.load, extra1._p_oid, '')
            # extra2 and extra3 should both still exist
            self._storage.load(extra2._p_oid, '')
            self._storage.load(extra3._p_oid, '')
        finally:
            db.close()
Пример #23
0
    def checkNonASCIITransactionMetadata(self):
        # Verify the database stores and retrieves non-ASCII text
        # in transaction metadata.
        ugly_string = ''.join(chr(c) for c in range(256))
        if not isinstance(ugly_string, bytes):
            # Py3
            check_string = ugly_string.encode("latin-1")
        else:
            check_string = ugly_string

        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['alpha'] = 1
            transaction.get().setUser(ugly_string)
            transaction.commit()
            r1['alpha'] = 2
            transaction.get().note(ugly_string)
            transaction.commit()

            info = self._storage.undoInfo()
            self.assertEqual(info[0]['description'], check_string)
            self.assertEqual(info[1]['user_name'], b'/ ' + check_string)
        finally:
            db.close()
Пример #24
0
    def checkPackGC(self, expect_object_deleted=True, close=True):
        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['alpha'] = PersistentMapping()
            transaction.commit()

            oid = r1['alpha']._p_oid
            r1['alpha'] = None
            transaction.commit()

            # The object should still exist
            self._storage.load(oid, '')

            # Pack
            now = packtime = time.time()
            while packtime <= now:
                packtime = time.time()
            self._storage.pack(packtime, referencesf)
            self._storage.sync()

            if expect_object_deleted:
                # The object should now be gone
                self.assertRaises(KeyError, self._storage.load, oid, '')
            else:
                # The object should still exist
                self._storage.load(oid, '')
        finally:
            if close:
                db.close()
        return oid
Пример #25
0
    def checkAutoReconnect(self):
        # Verify auto-reconnect
        db = DB(self._storage)
        try:
            c1 = db.open()
            r = c1.root()
            r['alpha'] = 1
            transaction.commit()
            c1.close()

            c1._storage._load_conn.close()
            c1._storage._store_conn.close()
            # ZODB5 implicitly calls sync
            # immediately when a connection is opened;
            # fake that here for older releases.
            c2 = db.open()
            self.assertIs(c2, c1)
            c2.sync()
            r = c2.root()
            self.assertEqual(r['alpha'], 1)
            r['beta'] = PersistentMapping()
            c2.add(r['beta'])
            transaction.commit()
            c2.close()
        finally:
            db.close()
Пример #26
0
    def checkNonASCIITransactionMetadata(self):
        # Verify the database stores and retrieves non-ASCII text
        # in transaction metadata.
        ugly_string = ''.join(chr(c) for c in range(256))
        if not isinstance(ugly_string, bytes):
            # Py3
            check_string = ugly_string.encode("latin-1")
        else:
            check_string = ugly_string

        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['alpha'] = 1
            transaction.get().setUser(ugly_string)
            transaction.commit()
            r1['alpha'] = 2
            transaction.get().note(ugly_string)
            transaction.commit()

            info = self._storage.undoInfo()
            self.assertEqual(info[0]['description'], check_string)
            self.assertEqual(info[1]['user_name'], b'/ ' + check_string)
        finally:
            db.close()
Пример #27
0
    def test_pack_with_1_day(self):
        from ZODB.DB import DB
        from ZODB.FileStorage import FileStorage
        from ZODB.POSException import POSKeyError
        import time
        import transaction
        from relstorage.zodbpack import main

        storage = FileStorage(self.db_fn, create=True)
        db = DB(storage)
        conn = db.open()
        conn.root()['x'] = 1
        transaction.commit()
        oid = b('\0' * 8)
        state, serial = storage.load(oid, b(''))
        time.sleep(0.1)
        conn.root()['x'] = 2
        transaction.commit()
        conn.close()
        self.assertEqual(state, storage.loadSerial(oid, serial))
        db.close()
        storage = None

        main(['', '--days=1', self.cfg_fn])

        # packing should not have removed the old state.
        storage = FileStorage(self.db_fn)
        self.assertEqual(state, storage.loadSerial(oid, serial))
        storage.close()
Пример #28
0
    def xxxcheckConcurrentUpdatesInVersions(self):
        self._storage = storage1 = self.openClientStorage()
        db1 = DB(storage1)
        db2 = DB(self.openClientStorage())
        stop = threading.Event()

        cn = db1.open()
        tree = cn.root()["tree"] = OOBTree()
        transaction.commit()
        cn.close()

        # Run three threads that update the BTree.
        # Two of the threads share a single storage so that it
        # is possible for both threads to read the same object
        # at the same time.

        cd = {}
        t1 = VersionStressThread(db1, stop, 1, cd, 1, 3)
        t2 = VersionStressThread(db2, stop, 2, cd, 2, 3, 0.01)
        t3 = VersionStressThread(db2, stop, 3, cd, 3, 3, 0.01)
        self.go(stop, cd, t1, t2, t3)

        while db1.lastTransaction() != db2.lastTransaction():
            db1._storage.sync()
            db2._storage.sync()

        cn = db1.open()
        tree = cn.root()["tree"]
        self._check_tree(cn, tree)
        self._check_threads(tree, t1, t2, t3)

        cn.close()
        db1.close()
        db2.close()
Пример #29
0
class BlobStorage(object):

    def __init__(self):
        """Prepares for a functional test case.
        """
        transaction.abort()

        storage = DemoStorage("Demo Storage")

        if not IBlobStorage.providedBy(storage):
            raise RuntimeError

        self.db = DB(storage)
        self.connection = None

    def clean(self):
        """Cleans up after a functional test case.
        """
        transaction.abort()
        if self.connection:
            self.connection.close()
            self.connection = None
        self.db.close()

    def close(self):
        if self.connection:
            self.connection.close()
            self.connection = None
        self.db.close()

    def open(self):
        if self.connection:
            self.close()
        self.connection = self.db.open()
        return self.connection.root()
Пример #30
0
 def test_storage_has_data(self):
     from relstorage.zodbconvert import storage_has_data
     src = FileStorage(self.srcfile, create=True)
     self.assertFalse(storage_has_data(src))
     db = DB(src)  # add the root object
     db.close()
     self.assertTrue(storage_has_data(src))
Пример #31
0
    def testUndo(self):
        database = DB(self._storage)
        connection = database.open()
        root = connection.root()
        transaction.begin()
        blob = Blob()
        with blob.open('w') as f:
            f.write(b'this is state 1')
        root['blob'] = blob
        transaction.commit()

        transaction.begin()
        blob = root['blob']
        with blob.open('w') as f:
            f.write(b'this is state 2')
        transaction.commit()


        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        with blob.open('r') as f:
            data = f.read()
        self.assertEqual(data, b'this is state 1')

        database.close()
Пример #32
0
    def test_pack_defaults(self):
        from ZODB.DB import DB
        from ZODB.FileStorage import FileStorage
        from ZODB.POSException import POSKeyError
        import time
        import transaction
        from relstorage.zodbpack import main

        storage = FileStorage(self.db_fn, create=True)
        db = DB(storage)
        conn = db.open()
        conn.root()['x'] = 1
        transaction.commit()
        oid = b'\0' * 8
        state, serial = storage.load(oid, '')
        time.sleep(0.1)
        conn.root()['x'] = 2
        transaction.commit()
        conn.close()
        self.assertEqual(state, storage.loadSerial(oid, serial))
        db.close()
        storage = None

        main(['', self.cfg_fn])

        # packing should have removed the old state.
        storage = FileStorage(self.db_fn)
        self.assertRaises(POSKeyError, storage.loadSerial, oid, serial)
        storage.close()
Пример #33
0
    def testLargeBlob(self):
        # Large blobs are chunked into multiple pieces, we want to know
        # if they come out the same way they went in.
        db = DB(self._storage)
        conn = db.open()
        blob = conn.root()[1] = ZODB.blob.Blob()
        size = sizeof_fmt(self.testsize)
        self._log('Creating %s blob file' % size)
        blob_file = blob.open('w')
        signature = random_file(self.testsize, blob_file)
        blob_file.close()
        self._log('Committing %s blob file' % size)
        transaction.commit()
        conn.close()

        # Clear the cache
        for base, _dir, files in os.walk('.'):
            for f in files:
                if f.endswith('.blob'):
                    ZODB.blob.remove_committed(os.path.join(base, f))

        # Re-download blob
        self._log('Caching %s blob file' % size)
        conn = db.open()
        with conn.root()[1].open('r') as blob:
            self._log('Creating signature for %s blob cache' % size)
            self.assertEqual(md5sum(blob), signature)

        conn.close()
        db.close()
Пример #34
0
    def testUndo(self):
        database = DB(self._storage)
        connection = database.open()
        root = connection.root()
        transaction.begin()
        blob = Blob()
        with blob.open('w') as f:
            f.write(b'this is state 1')
        root['blob'] = blob
        transaction.commit()

        transaction.begin()
        blob = root['blob']
        with blob.open('w') as f:
            f.write(b'this is state 2')
        transaction.commit()

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        with blob.open('r') as f:
            data = f.read()
        self.assertEqual(data, b'this is state 1')

        database.close()
Пример #35
0
    def checkConcurrentUpdates2Storages(self):
        self._storage = storage1 = self.openClientStorage()
        db1 = DB(storage1)
        storage2 = self.openClientStorage()
        db2 = DB(storage2)
        stop = threading.Event()

        cn = db1.open()
        tree = cn.root()["tree"] = OOBTree()
        transaction.commit()
        cn.close()

        # Run two threads that update the BTree
        cd = {}
        t1 = self.StressThread(self, db1, stop, 1, cd, 1)
        t2 = self.StressThread(self, db2, stop, 2, cd, 2)
        self.go(stop, cd, t1, t2)

        while db1.lastTransaction() != db2.lastTransaction():
            db1._storage.sync()
            db2._storage.sync()

        cn = db1.open()
        tree = cn.root()["tree"]
        self._check_tree(cn, tree)
        self._check_threads(tree, t1, t2)

        cn.close()
        db1.close()
        db2.close()
Пример #36
0
    def testDeepCopyCanInvalidate(self):
        """
        Tests regression for invalidation problems related to missing
        readers and writers values in cloned objects (see
        http://mail.zope.org/pipermail/zodb-dev/2008-August/012054.html)
        """
        import ZODB.MappingStorage
        database = DB(ZODB.blob.BlobStorage(
            'blobs', ZODB.MappingStorage.MappingStorage()))
        connection = database.open()
        root = connection.root()
        transaction.begin()
        root['blob'] = Blob()
        transaction.commit()

        stream = StringIO()
        p = Pickler(stream, 1)
        p.dump(root['blob'])
        u = Unpickler(stream)
        stream.seek(0)
        clone = u.load()
        clone._p_invalidate()

        # it should also be possible to open the cloned blob
        # (even though it won't contain the original data)
        clone.open()

        # tearDown
        database.close()
Пример #37
0
    def checkConcurrentUpdates2StoragesMT(self):
        self._storage = storage1 = self.openClientStorage()
        db1 = DB(storage1)
        db2 = DB(self.openClientStorage())
        stop = threading.Event()

        cn = db1.open()
        tree = cn.root()["tree"] = OOBTree()
        transaction.commit()
        cn.close()

        # Run three threads that update the BTree.
        # Two of the threads share a single storage so that it
        # is possible for both threads to read the same object
        # at the same time.

        cd = {}
        t1 = self.StressThread(self, db1, stop, 1, cd, 1, 3)
        t2 = self.StressThread(self, db2, stop, 2, cd, 2, 3, 0.01)
        t3 = self.StressThread(self, db2, stop, 3, cd, 3, 3, 0.01)
        self.go(stop, cd, t1, t2, t3)

        while db1.lastTransaction() != db2.lastTransaction():
            time.sleep(.1)

        time.sleep(.1)
        cn = db1.open()
        tree = cn.root()["tree"]
        self._check_tree(cn, tree)
        self._check_threads(tree, t1, t2, t3)

        cn.close()
        db1.close()
        db2.close()
Пример #38
0
    def checkPackGC(self, expect_object_deleted=True):
        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['alpha'] = PersistentMapping()
            transaction.commit()

            oid = r1['alpha']._p_oid
            r1['alpha'] = None
            transaction.commit()

            # The object should still exist
            self._storage.load(oid, '')

            # Pack
            now = packtime = time.time()
            while packtime <= now:
                packtime = time.time()
            self._storage.pack(packtime, referencesf)
            self._storage.sync()

            if expect_object_deleted:
                # The object should now be gone
                self.assertRaises(KeyError, self._storage.load, oid, '')
            else:
                # The object should still exist
                self._storage.load(oid, '')
        finally:
            db.close()
        return oid
Пример #39
0
    def testUndoAfterConsumption(self):
        database = DB(self._storage)
        connection = database.open()
        root = connection.root()
        transaction.begin()
        with open('consume1', 'wb') as file:
            file.write(b'this is state 1')
        blob = Blob()
        blob.consumeFile('consume1')
        root['blob'] = blob
        transaction.commit()

        transaction.begin()
        blob = root['blob']
        with open('consume2', 'wb') as file:
            file.write(b'this is state 2')
        blob.consumeFile('consume2')
        transaction.commit()

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        with blob.open('r') as file:
            self.assertEqual(file.read(), b'this is state 1')

        database.close()
Пример #40
0
    def testRedo(self):
        database = DB(self._storage)
        connection = database.open()
        root = connection.root()
        blob = Blob()

        transaction.begin()
        blob.open('w').write(b('this is state 1'))
        root['blob'] = blob
        transaction.commit()

        transaction.begin()
        blob = root['blob']
        blob.open('w').write(b('this is state 2'))
        transaction.commit()

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        self.assertEqual(blob.open('r').read(), b('this is state 1'))

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        self.assertEqual(blob.open('r').read(), b('this is state 2'))

        database.close()
Пример #41
0
 def testSimpleBlobRecovery(self):
     self.assertTrue(
         ZODB.interfaces.IBlobStorageRestoreable.providedBy(self._storage)
         )
     db = DB(self._storage)
     conn = db.open()
     conn.root()[1] = ZODB.blob.Blob()
     transaction.commit()
     conn.root()[2] = ZODB.blob.Blob()
     with conn.root()[2].open('w') as file:
         file.write(b'some data')
     transaction.commit()
     conn.root()[3] = ZODB.blob.Blob()
     with conn.root()[3].open('w') as file:
         file.write(
             (b''.join(struct.pack(">I", random.randint(0, (1<<32)-1))
                      for i in range(random.randint(10000,20000)))
              )[:-random.randint(1,4)]
             )
     transaction.commit()
     conn.root()[2] = ZODB.blob.Blob()
     with conn.root()[2].open('w') as file:
         file.write(b'some other data')
     transaction.commit()
     self._dst.copyTransactionsFrom(self._storage)
     self.compare(self._storage, self._dst)
     db.close()
Пример #42
0
    def checkAutoReconnectOnSync(self):
        # Verify auto-reconnect.
        db = DB(self._storage)
        try:
            c1 = db.open()
            r = c1.root()

            c1._storage._load_conn.close()
            c1._storage.sync()
            # ZODB5 calls sync when a connection is opened. Our monkey
            # patch on a Connection makes sure that works in earlier
            # versions, but we don't have that patch on ZODB5. So test
            # the storage directly. NOTE: The load connection must be open.
            # to trigger the actual sync.

            r = c1.root()
            r['alpha'] = 1
            transaction.commit()
            c1.close()

            c1._storage._load_conn.close()
            c1._storage._store_conn.close()

            c2 = db.open()
            self.assertIs(c2, c1)

            r = c2.root()
            self.assertEqual(r['alpha'], 1)
            r['beta'] = PersistentMapping()
            c2.add(r['beta'])
            transaction.commit()
            c2.close()
        finally:
            db.close()
Пример #43
0
    def check_record_iternext(self):
        from ZODB.DB import DB

        db = DB(self._storage)
        conn = db.open()
        conn.root()['abc'] = MinPO('abc')
        conn.root()['xyz'] = MinPO('xyz')
        transaction.commit()

        # Ensure it's all on disk.
        db.close()
        self._storage.close()

        self.open()

        key = None
        for x in ('\000', '\001', '\002'):
            oid, tid, data, next_oid = self._storage.record_iternext(key)
            self.assertEqual(oid, ('\000' * 7) + x)
            key = next_oid
            expected_data, expected_tid = self._storage.load(oid, '')
            self.assertEqual(expected_data, data)
            self.assertEqual(expected_tid, tid)
            if x == '\002':
                self.assertEqual(next_oid, None)
            else:
                self.assertNotEqual(next_oid, None)
Пример #44
0
    def checkConcurrentUpdates2Storages(self):
        self._storage = storage1 = self.openClientStorage()
        storage2 = self.openClientStorage()
        db1 = DB(storage1)
        db2 = DB(storage2)
        stop = threading.Event()

        cn = db1.open()
        tree = cn.root()["tree"] = OOBTree()
        transaction.commit()
        cn.close()

        # Run two threads that update the BTree
        cd = {}
        t1 = self.StressThread(db1, stop, 1, cd, 1)
        t2 = self.StressThread(db2, stop, 2, cd, 2)
        self.go(stop, cd, t1, t2)

        while db1.lastTransaction() != db2.lastTransaction():
            db1._storage.sync()
            db2._storage.sync()

        cn = db1.open()
        tree = cn.root()["tree"]
        self._check_tree(cn, tree)
        self._check_threads(tree, t1, t2)

        cn.close()
        db1.close()
        db2.close()
Пример #45
0
    def checkNonASCIITransactionMetadata(self):
        # Verify the database stores and retrieves non-ASCII text
        # in transaction metadata.
        ugly_string = ''.join(chr(c) for c in range(256))
        if isinstance(ugly_string, bytes):
            # Always text. Use latin 1 because it can decode any arbitrary
            # bytes.
            ugly_string = ugly_string.decode('latin-1')

        # The storage layer is defined to take bytes (implicitly in
        # older ZODB releases, explicitly in ZODB 5.something), but historically
        # it can accept either text or bytes. However, it always returns bytes
        check_string = ugly_string.encode("utf-8")

        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['alpha'] = 1
            transaction.get().setUser(ugly_string)
            transaction.commit()
            r1['alpha'] = 2
            transaction.get().note(ugly_string)
            transaction.commit()

            info = self._storage.undoInfo()
            self.assertEqual(info[0]['description'], check_string)
            self.assertEqual(info[1]['user_name'], b'/ ' + check_string)
        finally:
            db.close()
Пример #46
0
    def test_pack_with_1_day(self):
        from ZODB.DB import DB
        from ZODB.FileStorage import FileStorage
        import time
        import transaction
        from relstorage.zodbpack import main

        storage = FileStorage(self.db_fn, create=True)
        db = DB(storage)
        conn = db.open()
        conn.root()['x'] = 1
        transaction.commit()
        oid = b'\0' * 8
        state, serial = storage.load(oid, '')
        time.sleep(0.1)
        conn.root()['x'] = 2
        transaction.commit()
        conn.close()
        self.assertEqual(state, storage.loadSerial(oid, serial))
        db.close()
        storage = None

        main(['', '--days=1', self.cfg_fn])

        # packing should not have removed the old state.
        storage = FileStorage(self.db_fn)
        self.assertEqual(state, storage.loadSerial(oid, serial))
        storage.close()
Пример #47
0
    def checkPackBatchLockNoWait(self):
        # Exercise the code in the pack algorithm that attempts to get the
        # commit lock but will sleep if the lock is busy.
        self._storage = self.make_storage(pack_batch_timeout=0)

        adapter = self._storage._adapter
        test_conn, test_cursor = adapter.connmanager.open()

        slept = []
        def sim_sleep(seconds):
            slept.append(seconds)
            adapter.locker.release_commit_lock(test_cursor)
            test_conn.rollback()
            adapter.connmanager.close(test_conn, test_cursor)

        db = DB(self._storage)
        try:
            # add some data to be packed
            c = db.open()
            r = c.root()
            r['alpha'] = PersistentMapping()
            transaction.commit()
            del r['alpha']
            transaction.commit()

            # Pack, with a commit lock held
            now = packtime = time.time()
            while packtime <= now:
                packtime = time.time()
            adapter.locker.hold_commit_lock(test_cursor)
            self._storage.pack(packtime, referencesf, sleep=sim_sleep)

            self.assertTrue(len(slept) > 0)
        finally:
            db.close()
Пример #48
0
    def checkCrossConnectionInvalidation(self):
        # Verify connections see updated state at txn boundaries
        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['myobj'] = 'yes'
            c2 = db.open()
            r2 = c2.root()
            self.assert_('myobj' not in r2)

            storage = c1._storage
            t = transaction.Transaction()
            t.description = 'invalidation test'
            storage.tpc_begin(t)
            c1.commit(t)
            storage.tpc_vote(t)
            storage.tpc_finish(t)

            self.assert_('myobj' not in r2)
            c2.sync()
            self.assert_('myobj' in r2)
            self.assert_(r2['myobj'] == 'yes')
        finally:
            db.close()
Пример #49
0
 def test_storage_has_data(self):
     from relstorage.zodbconvert import storage_has_data
     src = FileStorage(self.srcfile, create=True)
     self.assertFalse(storage_has_data(src))
     db = DB(src)  # add the root object
     db.close()
     self.assertTrue(storage_has_data(src))
Пример #50
0
    def checkPackWhileReferringObjectChanges(self):
        # Packing should not remove objects referenced by an
        # object that changes during packing.
        db = DB(self._storage)
        try:
            # add some data to be packed
            c = db.open()
            root = c.root()
            child = PersistentMapping()
            root['child'] = child
            transaction.commit()
            expect_oids = [child._p_oid]

            def inject_changes():
                # Change the database just after the list of objects
                # to analyze has been determined.
                child2 = PersistentMapping()
                root['child2'] = child2
                transaction.commit()
                expect_oids.append(child2._p_oid)

            adapter = self._storage._adapter
            adapter.packundo.on_filling_object_refs = inject_changes
            packtime = time.time()
            self._storage.pack(packtime, referencesf)

            self.assertEqual(
                len(expect_oids), 2,
                "The on_filling_object_refs hook should have been called once")
            # Both children should still exist.
            self._storage.load(expect_oids[0], '')
            self._storage.load(expect_oids[1], '')
        finally:
            db.close()
Пример #51
0
    def checkCrossDBInvalidations(self):
        db1 = DB(self.openClientStorage())
        c1 = db1.open()
        r1 = c1.root()

        r1["a"] = MinPO("a")
        transaction.commit()
        self.assertEqual(r1._p_state, 0) # up-to-date

        db2 = DB(self.openClientStorage())
        r2 = db2.open().root()

        self.assertEqual(r2["a"].value, "a")

        r2["b"] = MinPO("b")
        transaction.commit()

        # Make sure the invalidation is received in the other client.
        # We've had problems with this timing out on "slow" and/or "very
        # busy" machines, so we increase the sleep time on each trip, and
        # are willing to wait quite a long time.
        for i in range(20):
            c1.sync()
            if r1._p_state == -1:
                break
            time.sleep(i / 10.0)
        self.assertEqual(r1._p_state, -1) # ghost

        r1.keys() # unghostify
        self.assertEqual(r1._p_serial, r2._p_serial)
        self.assertEqual(r1["b"].value, "b")

        db2.close()
        db1.close()
Пример #52
0
    def checkHistoricalConnection(self):
        import persistent
        import ZODB.POSException
        db = DB(self._storage)
        conn = db.open()
        root = conn.root()

        root['first'] = persistent.mapping.PersistentMapping(count=0)
        transaction.commit()

        time_of_first_transaction = conn._storage.lastTransaction()

        root['second'] = persistent.mapping.PersistentMapping()
        root['first']['count'] += 1
        transaction.commit()

        transaction1 = transaction.TransactionManager(explicit=True)

        historical_conn = db.open(transaction_manager=transaction1,
                                  at=time_of_first_transaction)

        eq = self.assertEqual

        # regular connection sees present:

        eq(sorted(conn.root().keys()), ['first', 'second'])
        eq(conn.root()['first']['count'], 1)

        # historical connection sees past:
        transaction1.begin()
        eq(sorted(historical_conn.root().keys()), ['first'])
        eq(historical_conn.root()['first']['count'], 0)

        # Can't change history:

        historical_conn.root()['first']['count'] += 1
        eq(historical_conn.root()['first']['count'], 1)
        self.assertRaises(ZODB.POSException.ReadOnlyHistoryError,
                          transaction1.commit)
        transaction1.abort()
        eq(historical_conn.root()['first']['count'], 0)

        # Making a change in the present
        root['third'] = 3
        transaction.commit()

        # Is also not reflected in the past, even after explicit sync,
        transaction1.begin()
        eq(sorted(historical_conn.root().keys()), ['first'])
        eq(historical_conn.root()['first']['count'], 0)
        # Since we cannot change anything, we cannot join a transaction either.
        # The afterCompletion call is never invoked.
        historical_conn._storage._storage.afterCompletion = lambda: self.fail(
            "Not called")
        transaction1.commit()

        historical_conn.close()
        conn.close()
        db.close()
Пример #53
0
 def _check_nested(storage):
     db4 = DB(storage)
     txm4 = transaction.TransactionManager(True)
     conn4 = db4.open(txm4)
     txm4.begin()
     self.assertEqual(conn4.root.myobj1.key['foo'], 'bar')
     conn4.close()
     db4.close()
Пример #54
0
 def checkClosingNestedDatabasesWorks(self):
     # This tests for the error described in
     # https://github.com/zopefoundation/ZODB/issues/45
     db1 = DB(self._storage)
     db2 = DB(None, databases=db1.databases, database_name='2')
     db1.open().get_connection('2')
     db1.close()
     db2.close()
Пример #55
0
 def checkClosingNestedDatabasesWorks(self):
     # This tests for the error described in
     # https://github.com/zopefoundation/ZODB/issues/45
     db1 = DB(self._storage)
     db2 = DB(None, databases=db1.databases, database_name='2')
     db1.open().get_connection('2')
     db1.close()
     db2.close()
Пример #56
0
def setSystem(system):
	addr = 'localhost', 9001
	storage = ClientStorage(addr)
	db = DB(storage)
	connection = db.open()
	root = connection.root()
	root["system"] = system
	transaction.commit()
	db.close()
Пример #57
0
    def checkBackwardTimeTravelWithRevertWhenStale(self):
        # If revert_when_stale is true, when the database
        # connection is stale (such as through failover to an
        # asynchronous slave that is not fully up to date), the poller
        # should notice that backward time travel has occurred and
        # invalidate all objects that have changed in the interval.
        self._storage = self.make_storage(revert_when_stale=True)

        import os
        import shutil
        import tempfile
        from ZODB.FileStorage import FileStorage

        db = DB(self._storage)
        try:
            transaction.begin()
            c = db.open()
            r = c.root()
            r["alpha"] = PersistentMapping()
            transaction.commit()

            # To simulate failover to an out of date async slave, take
            # a snapshot of the database at this point, change some
            # object, then restore the database to its earlier state.

            d = tempfile.mkdtemp()
            try:
                transaction.begin()
                fs = FileStorage(os.path.join(d, "Data.fs"))
                fs.copyTransactionsFrom(c._storage)

                r["beta"] = PersistentMapping()
                transaction.commit()
                self.assertTrue("beta" in r)

                c._storage.zap_all(reset_oid=False, slow=True)
                c._storage.copyTransactionsFrom(fs)

                fs.close()
            finally:
                shutil.rmtree(d)

            # r should still be in the cache.
            self.assertTrue("beta" in r)

            # Now sync, which will call poll_invalidations().
            c.sync()

            # r should have been invalidated
            self.assertEqual(r._p_changed, None)

            # r should be reverted to its earlier state.
            self.assertFalse("beta" in r)

        finally:
            db.close()
Пример #58
0
 def get_persistent_settings(self):
     uri = self.settings['zodbconn.uri']
     storage_factory, dbkw = resolve_uri(uri)
     db = DB(storage_factory(), **dbkw)
     try:
         conn = db.open()
         home = self.find_home(conn.root())
         return home['settings']
     finally:
         db.close()
Пример #59
0
    def checkCachePolling(self):
        # NOTE: This test still sets poll_interval, a deprecated
        # option that does nothing. We keep it around to verify that
        # this scenario still works either way.
        self._storage = self.make_storage(
            poll_interval=3600, share_local_cache=False)

        db = DB(self._storage)
        try:
            # Set up the database.
            tm1 = transaction.TransactionManager()
            c1 = db.open(transaction_manager=tm1)
            r1 = c1.root()
            r1['obj'] = obj1 = PersistentMapping({'change': 0})
            tm1.commit()

            # Load and change the object in an independent connection.
            tm2 = transaction.TransactionManager()
            c2 = db.open(transaction_manager=tm2)
            r2 = c2.root()
            r2['obj']['change'] = 1
            tm2.commit()
            # Now c2 has delta_after0.
            self.assertEqual(len(c2._storage._cache.delta_after0), 1)
            c2.close()

            # Change the object in the original connection.
            c1.sync()
            obj1['change'] = 2
            tm1.commit()

            # Close the database connection to c2.
            c2._storage._drop_load_connection()

            # Make the database connection to c2 reopen without polling.
            c2._storage.load(b'\0' * 8, '')
            self.assertTrue(c2._storage._load_transaction_open)

            # Open a connection, which should be the same connection
            # as c2.
            c3 = db.open(transaction_manager=tm2)
            self.assertTrue(c3 is c2)
            self.assertEqual(len(c2._storage._cache.delta_after0), 1)

            # Clear the caches (but not delta_after*)
            c3._resetCache()
            for client in c3._storage._cache.clients_local_first:
                client.flush_all()

            obj3 = c3.root()['obj']
            # Should have loaded the new object.
            self.assertEqual(obj3['change'], 2)

        finally:
            db.close()