Ejemplo n.º 1
0
    def testRedo(self):
        database = DB(self._storage)
        connection = database.open()
        root = connection.root()
        blob = Blob()

        transaction.begin()
        blob.open('w').write(b('this is state 1'))
        root['blob'] = blob
        transaction.commit()

        transaction.begin()
        blob = root['blob']
        blob.open('w').write(b('this is state 2'))
        transaction.commit()

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        self.assertEqual(blob.open('r').read(), b('this is state 1'))

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        self.assertEqual(blob.open('r').read(), b('this is state 2'))

        database.close()
Ejemplo n.º 2
0
 def testSimpleBlobRecovery(self):
     if hasattr(ZODB.interfaces, 'IBlobStorageRestoreable'):
         self.assert_(
             ZODB.interfaces.IBlobStorageRestoreable.providedBy(
                 self._storage)
             )
     db = DB(self._storage)
     conn = db.open()
     conn.root()[1] = ZODB.blob.Blob()
     transaction.commit()
     conn.root()[2] = ZODB.blob.Blob()
     conn.root()[2].open('w').write(b('some data'))
     transaction.commit()
     conn.root()[3] = ZODB.blob.Blob()
     conn.root()[3].open('w').write(
         (b('').join(struct.pack(">I", random.randint(0, (1<<32)-1))
                  for i in range(random.randint(10000,20000)))
          )[:-random.randint(1,4)]
         )
     transaction.commit()
     conn.root()[2] = ZODB.blob.Blob()
     conn.root()[2].open('w').write(b('some other data'))
     transaction.commit()
     self._dst.copyTransactionsFrom(self._storage)
     self.compare(self._storage, self._dst)
Ejemplo n.º 3
0
    def test_pack_with_1_day(self):
        from ZODB.DB import DB
        from ZODB.FileStorage import FileStorage
        from ZODB.POSException import POSKeyError
        import time
        import transaction
        from relstorage.zodbpack import main

        storage = FileStorage(self.db_fn, create=True)
        db = DB(storage)
        conn = db.open()
        conn.root()['x'] = 1
        transaction.commit()
        oid = b('\0' * 8)
        state, serial = storage.load(oid, b(''))
        time.sleep(0.1)
        conn.root()['x'] = 2
        transaction.commit()
        conn.close()
        self.assertEqual(state, storage.loadSerial(oid, serial))
        db.close()
        storage = None

        main(['', '--days=1', self.cfg_fn])

        # packing should not have removed the old state.
        storage = FileStorage(self.db_fn)
        self.assertEqual(state, storage.loadSerial(oid, serial))
        storage.close()
Ejemplo n.º 4
0
    def testRedo(self):
        database = DB(self._storage)
        connection = database.open()
        root = connection.root()
        blob = Blob()

        transaction.begin()
        blob.open('w').write(b('this is state 1'))
        root['blob'] = blob
        transaction.commit()

        transaction.begin()
        blob = root['blob']
        blob.open('w').write(b('this is state 2'))
        transaction.commit()

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        self.assertEqual(blob.open('r').read(), b('this is state 1'))

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        self.assertEqual(blob.open('r').read(), b('this is state 2'))

        database.close()
Ejemplo n.º 5
0
    def test_pack_with_1_day(self):
        from ZODB.DB import DB
        from ZODB.FileStorage import FileStorage
        from ZODB.POSException import POSKeyError
        import time
        import transaction
        from relstorage.zodbpack import main

        storage = FileStorage(self.db_fn, create=True)
        db = DB(storage)
        conn = db.open()
        conn.root()['x'] = 1
        transaction.commit()
        oid = b('\0' * 8)
        state, serial = storage.load(oid, b(''))
        time.sleep(0.1)
        conn.root()['x'] = 2
        transaction.commit()
        conn.close()
        self.assertEqual(state, storage.loadSerial(oid, serial))
        db.close()
        storage = None

        main(['', '--days=1', self.cfg_fn])

        # packing should not have removed the old state.
        storage = FileStorage(self.db_fn)
        self.assertEqual(state, storage.loadSerial(oid, serial))
        storage.close()
Ejemplo n.º 6
0
    def _transaction_iterator(self, cursor):
        """Iterate over a list of transactions returned from the database.

        Each row begins with (tid, username, description, extension)
        and may have other columns.
        """
        use_base64 = self.use_base64
        for row in cursor:
            tid, username, description, ext = row[:4]

            if username is None:
                username = b('')
            else:
                username = decode_bytes_param(username, use_base64)

            if description is None:
                description = b('')
            else:
                description = decode_bytes_param(description, use_base64)

            if ext is None:
                ext = b('')
            else:
                ext = decode_bytes_param(ext, use_base64)

            yield (tid, username, description, ext) + tuple(row[4:])
Ejemplo n.º 7
0
    def _transaction_iterator(self, cursor):
        """Iterate over a list of transactions returned from the database.

        Each row begins with (tid, username, description, extension)
        and may have other columns.
        """
        use_base64 = self.use_base64
        for row in cursor:
            tid, username, description, ext = row[:4]

            if username is None:
                username = b('')
            else:
                username = decode_bytes_param(username, use_base64)

            if description is None:
                description = b('')
            else:
                description = decode_bytes_param(description, use_base64)

            if ext is None:
                ext = b('')
            else:
                ext = decode_bytes_param(ext, use_base64)

            yield (tid, username, description, ext) + tuple(row[4:])
Ejemplo n.º 8
0
 def test_write_and_read_limited(self):
     t = self.getClass()()
     t.write(b('abc'))
     self.assertEqual(t.tell(), 3)
     t.seek(0)
     self.assertEqual(t.tell(), 0)
     self.assertEqual(t.read(2), b('ab'))
     self.assertEqual(t.tell(), 2)
Ejemplo n.º 9
0
 def test_write_and_read_limited(self):
     t = self.getClass()()
     t.write(b('abc'))
     self.assertEqual(t.tell(), 3)
     t.seek(0)
     self.assertEqual(t.tell(), 0)
     self.assertEqual(t.read(2), b('ab'))
     self.assertEqual(t.tell(), 2)
Ejemplo n.º 10
0
 def test_restoreBlob_shared(self):
     fn = os.path.join(self.blob_dir, 'newblob')
     write_file(fn, b('here a blob'))
     obj = self._make_default()
     obj.restoreBlob(None, test_oid, test_tid, fn)
     self.assertFalse(os.path.exists(fn))
     target_fn = obj.fshelper.getBlobFilename(test_oid, test_tid)
     self.assertEqual(read_file(target_fn), b('here a blob'))
Ejemplo n.º 11
0
 def test_restoreBlob_shared(self):
     fn = os.path.join(self.blob_dir, 'newblob')
     write_file(fn, b('here a blob'))
     obj = self._make_default()
     obj.restoreBlob(None, test_oid, test_tid, fn)
     self.assertFalse(os.path.exists(fn))
     target_fn = obj.fshelper.getBlobFilename(test_oid, test_tid)
     self.assertEqual(read_file(target_fn), b('here a blob'))
Ejemplo n.º 12
0
 def test_store_temp(self):
     c = self._makeOne()
     c.tpc_begin()
     c.store_temp(2, b('abc'))
     c.store_temp(1, b('def'))
     c.store_temp(2, b('ghi'))
     self.assertEqual(c.queue_contents, {1: (3, 6), 2: (6, 9)})
     c.queue.seek(0)
     self.assertEqual(c.queue.read(), b('abcdefghi'))
Ejemplo n.º 13
0
 def test_store_temp(self):
     c = self._makeOne()
     c.tpc_begin()
     c.store_temp(2, b('abc'))
     c.store_temp(1, b('def'))
     c.store_temp(2, b('ghi'))
     self.assertEqual(c.queue_contents, {1: (3, 6), 2: (6, 9)})
     c.queue.seek(0)
     self.assertEqual(c.queue.read(), b('abcdefghi'))
Ejemplo n.º 14
0
 def test_vote(self):
     obj = self._make_default()
     d = obj.fshelper.getPathForOID(test_oid, create=True)
     fn1 = os.path.join(d, 'newblob')
     write_file(fn1, b('here a blob'))
     obj._txn_blobs = {test_oid: fn1}
     obj.vote(test_tid)
     fn2 = obj.fshelper.getBlobFilename(test_oid, test_tid)
     self.assertEqual(read_file(fn2), b('here a blob'))
Ejemplo n.º 15
0
 def test_vote(self):
     obj = self._make_default()
     d = obj.fshelper.getPathForOID(test_oid, create=True)
     fn1 = os.path.join(d, 'newblob')
     write_file(fn1, b('here a blob'))
     obj._txn_blobs = {test_oid: fn1}
     obj.vote(test_tid)
     fn2 = obj.fshelper.getBlobFilename(test_oid, test_tid)
     self.assertEqual(read_file(fn2), b('here a blob'))
Ejemplo n.º 16
0
 def checkPreventOIDOverlap(self):
     # Store an object with a particular OID, then verify that
     # OID is not reused.
     data = b('mydata')
     oid1 = b('\0' * 7 + '\x0f')
     self._dostoreNP(oid1, data=data)
     oid2 = self._storage.new_oid()
     self.assert_(oid1 < oid2, 'old OID %r should be less than new OID %r'
         % (oid1, oid2))
Ejemplo n.º 17
0
 def test_copy_undone_shared(self):
     obj = self._make_default()
     copied = [(1, 1), (11, 1)]
     fn = obj.fshelper.getBlobFilename(test_oid, test_oid)
     os.makedirs(os.path.dirname(fn))
     write_file(fn, b('blob here'))
     obj.copy_undone(copied, test_tid)
     self.assertTrue(obj.txn_has_blobs)
     fn2 = obj.fshelper.getBlobFilename(test_oid, test_tid)
     self.assertEqual(read_file(fn2), b('blob here'))
Ejemplo n.º 18
0
 def fdata():
     seed = "1092384956781341341234656953214543219"
     # Just use the this module as the source of our data
     words = open(__file__, "rb").read().replace(b("\n"), b('')).split()
     w = collections.deque(words)
     s = collections.deque(seed)
     while True:
         yield b(' ').join(list(w)[0:1024])
         w.rotate(int(s[0]))
         s.rotate(1)
Ejemplo n.º 19
0
 def fdata():
     seed = "1092384956781341341234656953214543219"
     # Just use the this module as the source of our data
     words = open(__file__, "rb").read().replace(b("\n"), b('')).split()
     w = collections.deque(words)
     s = collections.deque(seed)
     while True:
         yield b(' ').join(list(w)[0:1024])
         w.rotate(int(s[0]))
         s.rotate(1)
Ejemplo n.º 20
0
 def test_load_using_checkpoint0_hit(self):
     from relstorage.tests.fakecache import data
     from ZODB.utils import p64
     adapter = MockAdapter()
     c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix')
     c.current_tid = 60
     c.checkpoints = (50, 40)
     data['myprefix:state:50:2'] = p64(45) + b('xyz')
     res = c.load(None, 2)
     self.assertEqual(res, (b('xyz'), 45))
Ejemplo n.º 21
0
 def test_copy_undone_shared(self):
     obj = self._make_default()
     copied = [(1, 1), (11, 1)]
     fn = obj.fshelper.getBlobFilename(test_oid, test_oid)
     os.makedirs(os.path.dirname(fn))
     write_file(fn, b('blob here'))
     obj.copy_undone(copied, test_tid)
     self.assertTrue(obj.txn_has_blobs)
     fn2 = obj.fshelper.getBlobFilename(test_oid, test_tid)
     self.assertEqual(read_file(fn2), b('blob here'))
Ejemplo n.º 22
0
 def test_load_using_checkpoint0_hit(self):
     from relstorage.tests.fakecache import data
     from ZODB.utils import p64
     adapter = MockAdapter()
     c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix')
     c.current_tid = 60
     c.checkpoints = (50, 40)
     data['myprefix:state:50:2'] = p64(45) + b('xyz')
     res = c.load(None, 2)
     self.assertEqual(res, (b('xyz'), 45))
Ejemplo n.º 23
0
 def checkPreventOIDOverlap(self):
     # Store an object with a particular OID, then verify that
     # OID is not reused.
     data = b('mydata')
     oid1 = b('\0' * 7 + '\x0f')
     self._dostoreNP(oid1, data=data)
     oid2 = self._storage.new_oid()
     self.assert_(
         oid1 < oid2,
         'old OID %r should be less than new OID %r' % (oid1, oid2))
Ejemplo n.º 24
0
 def test_load_using_checkpoint1_miss(self):
     from relstorage.tests.fakecache import data
     from ZODB.utils import p64
     adapter = MockAdapter()
     c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix')
     c.current_tid = 60
     c.checkpoints = (50, 40)
     adapter.mover.data[2] = (b('123'), 35)
     res = c.load(None, 2)
     self.assertEqual(res, (b('123'), 35))
     self.assertEqual(data.get('myprefix:state:50:2'), p64(35) + b('123'))
Ejemplo n.º 25
0
    def test_restoreBlob_unshared(self):
        if not support_blob_cache:
            return

        fn = os.path.join(self.blob_dir, 'newblob')
        write_file(fn, b('here a blob'))
        obj = self._make_default(shared=False)
        obj.restoreBlob(None, test_oid, test_tid, fn)
        self.assertEqual(self.uploaded[:2], (1, 2))
        target_fn = self.uploaded[2]
        self.assertEqual(read_file(target_fn), b('here a blob'))
Ejemplo n.º 26
0
 def test_load_using_delta_after0_miss(self):
     from relstorage.tests.fakecache import data
     from ZODB.utils import p64
     adapter = MockAdapter()
     c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix')
     c.current_tid = 60
     c.checkpoints = (50, 40)
     c.delta_after0[2] = 55
     adapter.mover.data[2] = (b('abc'), 55)
     res = c.load(None, 2)
     self.assertEqual(res, (b('abc'), 55))
Ejemplo n.º 27
0
 def test_load_using_delta_after0_miss(self):
     from relstorage.tests.fakecache import data
     from ZODB.utils import p64
     adapter = MockAdapter()
     c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix')
     c.current_tid = 60
     c.checkpoints = (50, 40)
     c.delta_after0[2] = 55
     adapter.mover.data[2] = (b('abc'), 55)
     res = c.load(None, 2)
     self.assertEqual(res, (b('abc'), 55))
Ejemplo n.º 28
0
    def test_restoreBlob_unshared(self):
        if not support_blob_cache:
            return

        fn = os.path.join(self.blob_dir, 'newblob')
        write_file(fn, b('here a blob'))
        obj = self._make_default(shared=False)
        obj.restoreBlob(None, test_oid, test_tid, fn)
        self.assertEqual(self.uploaded[:2], (1, 2))
        target_fn = self.uploaded[2]
        self.assertEqual(read_file(target_fn), b('here a blob'))
Ejemplo n.º 29
0
 def test_load_using_checkpoint1_miss(self):
     from relstorage.tests.fakecache import data
     from ZODB.utils import p64
     adapter = MockAdapter()
     c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix')
     c.current_tid = 60
     c.checkpoints = (50, 40)
     adapter.mover.data[2] = (b('123'), 35)
     res = c.load(None, 2)
     self.assertEqual(res, (b('123'), 35))
     self.assertEqual(data.get('myprefix:state:50:2'), p64(35) + b('123'))
Ejemplo n.º 30
0
 def test_send_queue_small(self):
     from relstorage.tests.fakecache import data
     from ZODB.utils import p64
     c = self._makeOne()
     c.tpc_begin()
     c.store_temp(2, b('abc'))
     c.store_temp(3, b('def'))
     tid = p64(55)
     c.send_queue(tid)
     self.assertEqual(data, {
         'myprefix:state:55:2': tid + b('abc'),
         'myprefix:state:55:3': tid + b('def'),
         })
Ejemplo n.º 31
0
 def test_convert_to_temporary_file(self):
     t = self.getClass()(threshold=4)
     try:
         self.assertEqual(t._threshold, 4)
         t.write(b('abc'))
         self.assertEqual(t._threshold, 4)
         t.write(b('d'))
         self.assertEqual(t._threshold, 0)
         t.write(b('e'))
         t.seek(0)
         self.assertEqual(t.read(), b('abcde'))
     finally:
         t.close()
Ejemplo n.º 32
0
 def test_convert_to_temporary_file(self):
     t = self.getClass()(threshold=4)
     try:
         self.assertEqual(t._threshold, 4)
         t.write(b('abc'))
         self.assertEqual(t._threshold, 4)
         t.write(b('d'))
         self.assertEqual(t._threshold, 0)
         t.write(b('e'))
         t.seek(0)
         self.assertEqual(t.read(), b('abcde'))
     finally:
         t.close()
Ejemplo n.º 33
0
 def test_send_queue_small(self):
     from relstorage.tests.fakecache import data
     from ZODB.utils import p64
     c = self._makeOne()
     c.tpc_begin()
     c.store_temp(2, b('abc'))
     c.store_temp(3, b('def'))
     tid = p64(55)
     c.send_queue(tid)
     self.assertEqual(
         data, {
             'myprefix:state:55:2': tid + b('abc'),
             'myprefix:state:55:3': tid + b('def'),
         })
Ejemplo n.º 34
0
 def test_after_pack_shared_without_history(self):
     obj = self._make_default(keep_history=False)
     fn = obj.fshelper.getBlobFilename(test_oid, test_tid)
     os.makedirs(os.path.dirname(fn))
     write_file(fn, b('blob here'))
     obj.after_pack(1, 2)
     self.assertFalse(os.path.exists(fn))
Ejemplo n.º 35
0
 def test_overwrite_during_conversion(self):
     t = self.getClass()(threshold=4)
     try:
         t.write(b('abc'))
         self.assertEqual(t._threshold, 4)
         t.seek(1)
         t.write(b('0'))
         self.assertEqual(t._threshold, 4)
         t.write(b('1'))
         self.assertEqual(t._threshold, 4)
         t.write(b('23'))
         self.assertEqual(t._threshold, 0)
         t.seek(0)
         self.assertEqual(t.read(), b('a0123'))
     finally:
         t.close()
Ejemplo n.º 36
0
 def test_overwrite_during_conversion(self):
     t = self.getClass()(threshold=4)
     try:
         t.write(b('abc'))
         self.assertEqual(t._threshold, 4)
         t.seek(1)
         t.write(b('0'))
         self.assertEqual(t._threshold, 4)
         t.write(b('1'))
         self.assertEqual(t._threshold, 4)
         t.write(b('23'))
         self.assertEqual(t._threshold, 0)
         t.seek(0)
         self.assertEqual(t.read(), b('a0123'))
     finally:
         t.close()
Ejemplo n.º 37
0
 def test_loadBlob_shared_exists(self):
     obj = self._make_default()
     fn = obj.fshelper.getBlobFilename(test_oid, test_tid)
     os.makedirs(os.path.dirname(fn))
     write_file(fn, b('blob here'))
     res = obj.loadBlob(None, test_oid, test_tid)
     self.assertEqual(fn, res)
Ejemplo n.º 38
0
 def test_after_pack_shared_without_history(self):
     obj = self._make_default(keep_history=False)
     fn = obj.fshelper.getBlobFilename(test_oid, test_tid)
     os.makedirs(os.path.dirname(fn))
     write_file(fn, b('blob here'))
     obj.after_pack(1, 2)
     self.assertFalse(os.path.exists(fn))
Ejemplo n.º 39
0
    def setUp(self):
        import os
        import tempfile

        fd, self.srcfile = tempfile.mkstemp()
        os.close(fd)
        os.remove(self.srcfile)

        fd, self.destfile = tempfile.mkstemp()
        os.close(fd)
        os.remove(self.destfile)

        cfg = b(
            """
        <filestorage source>
            path %s
        </filestorage>
        <filestorage destination>
            path %s
        </filestorage>
        """
            % (self.srcfile, self.destfile),
            "UTF-8",
        )

        fd, self.cfgfile = tempfile.mkstemp()
        os.write(fd, cfg)
        os.close(fd)
Ejemplo n.º 40
0
 def test_loadBlob_shared_exists(self):
     obj = self._make_default()
     fn = obj.fshelper.getBlobFilename(test_oid, test_tid)
     os.makedirs(os.path.dirname(fn))
     write_file(fn, b('blob here'))
     res = obj.loadBlob(None, test_oid, test_tid)
     self.assertEqual(fn, res)
Ejemplo n.º 41
0
 def check16MObject(self):
     # Store 16 * 1024 * 1024 bytes in an object, then retrieve it
     data = b('a 16 byte string') * (1024 * 1024)
     oid = self._storage.new_oid()
     self._dostoreNP(oid, data=data)
     got, serialno = self._storage.load(oid, '')
     self.assertEqual(len(got), len(data))
     self.assertEqual(got, data)
Ejemplo n.º 42
0
    def test_openCommittedBlobFile_as_file(self):
        if not support_blob_cache:
            return

        obj = self._make_default(shared=False)
        f = obj.openCommittedBlobFile(None, test_oid, test_tid)
        self.assertEqual(f.fileno().__class__, int)
        self.assertEqual(f.read(), b('blob here'))
Ejemplo n.º 43
0
    def test_openCommittedBlobFile_as_file(self):
        if not support_blob_cache:
            return

        obj = self._make_default(shared=False)
        f = obj.openCommittedBlobFile(None, test_oid, test_tid)
        self.assertEqual(f.fileno().__class__, int)
        self.assertEqual(f.read(), b('blob here'))
Ejemplo n.º 44
0
 def check16MObject(self):
     # Store 16 * 1024 * 1024 bytes in an object, then retrieve it
     data = b('a 16 byte string') * (1024 * 1024)
     oid = self._storage.new_oid()
     self._dostoreNP(oid, data=data)
     got, serialno = self._storage.load(oid, '')
     self.assertEqual(len(got), len(data))
     self.assertEqual(got, data)
Ejemplo n.º 45
0
 def setUp(self):
     import os
     import tempfile
     fd, self.fn = tempfile.mkstemp()
     os.write(fd,
         b("# Replicas\n\nexample.com:1234\nlocalhost:4321\n"
         "\nlocalhost:9999\n"))
     os.close(fd)
Ejemplo n.º 46
0
 def setUp(self):
     import os
     import tempfile
     fd, self.fn = tempfile.mkstemp()
     os.write(
         fd,
         b("# Replicas\n\nexample.com:1234\nlocalhost:4321\n"
           "\nlocalhost:9999\n"))
     os.close(fd)
Ejemplo n.º 47
0
    def test_loadBlob_unshared_exists(self):
        if not support_blob_cache:
            return

        obj = self._make_default(shared=False)
        fn = obj.fshelper.getBlobFilename(test_oid, test_tid)
        os.makedirs(os.path.dirname(fn))
        write_file(fn, b('blob here'))
        res = obj.loadBlob(None, test_oid, test_tid)
        self.assertEqual(fn, res)
Ejemplo n.º 48
0
 def check99X1900Objects(self):
     # Store 99 objects each with 1900 bytes.  This is intended
     # to exercise possible buffer overfilling that the batching
     # code might cause.
     import transaction
     data = b('0123456789012345678') * 100
     t = transaction.Transaction()
     self._storage.tpc_begin(t)
     oids = []
     for i in range(99):
         oid = self._storage.new_oid()
         self._storage.store(oid, b('\0' * 8), data, '', t)
         oids.append(oid)
     self._storage.tpc_vote(t)
     self._storage.tpc_finish(t)
     for oid in oids:
         got, serialno = self._storage.load(oid, '')
         self.assertEqual(len(got), len(data))
         self.assertEqual(got, data)
Ejemplo n.º 49
0
 def check99X1900Objects(self):
     # Store 99 objects each with 1900 bytes.  This is intended
     # to exercise possible buffer overfilling that the batching
     # code might cause.
     import transaction
     data = b('0123456789012345678') * 100
     t = transaction.Transaction()
     self._storage.tpc_begin(t)
     oids = []
     for i in range(99):
         oid = self._storage.new_oid()
         self._storage.store(oid, b('\0' * 8), data, '', t)
         oids.append(oid)
     self._storage.tpc_vote(t)
     self._storage.tpc_finish(t)
     for oid in oids:
         got, serialno = self._storage.load(oid, '')
         self.assertEqual(len(got), len(data))
         self.assertEqual(got, data)
Ejemplo n.º 50
0
    def test_loadBlob_unshared_exists(self):
        if not support_blob_cache:
            return

        obj = self._make_default(shared=False)
        fn = obj.fshelper.getBlobFilename(test_oid, test_tid)
        os.makedirs(os.path.dirname(fn))
        write_file(fn, b('blob here'))
        res = obj.loadBlob(None, test_oid, test_tid)
        self.assertEqual(fn, res)
Ejemplo n.º 51
0
 def test_abort(self):
     obj = self._make_default()
     d = obj.fshelper.getPathForOID(test_oid, create=True)
     fn1 = os.path.join(d, 'newblob')
     write_file(fn1, b('here a blob'))
     obj._txn_blobs = {test_oid: fn1}
     obj.abort()
     fn2 = obj.fshelper.getBlobFilename(test_oid, test_tid)
     self.assertFalse(os.path.exists(fn1))
     self.assertFalse(os.path.exists(fn2))
Ejemplo n.º 52
0
 def test_abort(self):
     obj = self._make_default()
     d = obj.fshelper.getPathForOID(test_oid, create=True)
     fn1 = os.path.join(d, 'newblob')
     write_file(fn1, b('here a blob'))
     obj._txn_blobs = {test_oid: fn1}
     obj.abort()
     fn2 = obj.fshelper.getBlobFilename(test_oid, test_tid)
     self.assertFalse(os.path.exists(fn1))
     self.assertFalse(os.path.exists(fn2))
Ejemplo n.º 53
0
    def oracle_download_blob(self, cursor, oid, tid, filename):
        """Download a blob into a file."""
        stmt = """
        SELECT chunk
        FROM blob_chunk
        WHERE zoid = :1
            AND tid = :2
        ORDER BY chunk_num
        """

        f = None
        bytecount = 0
        # Current versions of cx_Oracle only support offsets up
        # to sys.maxint or 4GB, whichever comes first.
        maxsize = min(sys.maxint, 1 << 32)
        try:
            cursor.execute(stmt, (oid, tid))
            while True:
                try:
                    blob, = cursor.fetchone()
                except TypeError:
                    # No more chunks.  Note: if there are no chunks at
                    # all, then this method should not write a file.
                    break

                if f is None:
                    f = open(filename, 'wb')
                # round off the chunk-size to be a multiple of the oracle
                # blob chunk size to maximize performance
                read_chunk_size = int(
                    max(
                        round(1.0 * self.blob_chunk_size /
                              blob.getchunksize()), 1) * blob.getchunksize())
                offset = 1  # Oracle still uses 1-based indexing.
                reader = iter(lambda: blob.read(offset, read_chunk_size),
                              b(''))
                for read_chunk in reader:
                    f.write(read_chunk)
                    bytecount += len(read_chunk)
                    offset += len(read_chunk)
                    if offset > maxsize:
                        # We have already read the maximum we can store
                        # so we can assume we are done. If we do not break
                        # off here, cx_Oracle will throw an overflow
                        # exception anyway.
                        break
        except:
            if f is not None:
                f.close()
                os.remove(filename)
            raise

        if f is not None:
            f.close()
        return bytecount
Ejemplo n.º 54
0
    def test_openCommittedBlobFile_as_blobfile(self):
        if not support_blob_cache:
            return

        obj = self._make_default(shared=False)
        from ZODB.blob import Blob
        from ZODB.blob import BlobFile
        blob = Blob()
        f = obj.openCommittedBlobFile(None, test_oid, test_tid, blob=blob)
        self.assertEqual(f.__class__, BlobFile)
        self.assertEqual(f.read(), b('blob here'))
Ejemplo n.º 55
0
    def test_openCommittedBlobFile_as_blobfile(self):
        if not support_blob_cache:
            return

        obj = self._make_default(shared=False)
        from ZODB.blob import Blob
        from ZODB.blob import BlobFile
        blob = Blob()
        f = obj.openCommittedBlobFile(None, test_oid, test_tid, blob=blob)
        self.assertEqual(f.__class__, BlobFile)
        self.assertEqual(f.read(), b('blob here'))
Ejemplo n.º 56
0
    def oracle_download_blob(self, cursor, oid, tid, filename):
        """Download a blob into a file."""
        stmt = """
        SELECT chunk
        FROM blob_chunk
        WHERE zoid = :1
            AND tid = :2
        ORDER BY chunk_num
        """

        f = None
        bytecount = 0
        # Current versions of cx_Oracle only support offsets up
        # to sys.maxint or 4GB, whichever comes first.
        maxsize = min(sys.maxint, 1<<32)
        try:
            cursor.execute(stmt, (oid, tid))
            while True:
                try:
                    blob, = cursor.fetchone()
                except TypeError:
                    # No more chunks.  Note: if there are no chunks at
                    # all, then this method should not write a file.
                    break

                if f is None:
                    f = open(filename, 'wb')
                # round off the chunk-size to be a multiple of the oracle
                # blob chunk size to maximize performance
                read_chunk_size = int(max(round(
                    1.0 * self.blob_chunk_size / blob.getchunksize()), 1) *
                    blob.getchunksize())
                offset = 1 # Oracle still uses 1-based indexing.
                reader = iter(lambda: blob.read(offset, read_chunk_size), b(''))
                for read_chunk in reader:
                    f.write(read_chunk)
                    bytecount += len(read_chunk)
                    offset += len(read_chunk)
                    if offset > maxsize:
                        # We have already read the maximum we can store
                        # so we can assume we are done. If we do not break
                        # off here, cx_Oracle will throw an overflow
                        # exception anyway.
                        break
        except:
            if f is not None:
                f.close()
                os.remove(filename)
            raise

        if f is not None:
            f.close()
        return bytecount
Ejemplo n.º 57
0
    def test_bucket_sizes_with_compression(self):
        c = self._makeOne(cache_local_compression='zlib')
        c._bucket_limit = 21 * 2 + 1
        c.flush_all()

        c.set('k0', b('01234567') * 10)
        self.assertEqual(c._bucket0.size, 21)
        self.assertEqual(c._bucket1.size, 0)

        c.set('k1', b('76543210') * 10)
        self.assertEqual(c._bucket0.size, 21 * 2)
        self.assertEqual(c._bucket1.size, 0)

        c.set('k2', b('abcdefgh') * 10)
        self.assertEqual(c._bucket0.size, 21)
        self.assertEqual(c._bucket1.size, 21 * 2)

        v = c.get('k0')
        self.assertEqual(v, b('01234567') * 10)
        self.assertEqual(c._bucket0.size, 21 * 2)
        self.assertEqual(c._bucket1.size, 21)

        v = c.get('k1')
        self.assertEqual(v, b('76543210') * 10)
        self.assertEqual(c._bucket0.size, 21)
        self.assertEqual(c._bucket1.size, 21 * 2)

        v = c.get('k2')
        self.assertEqual(v, b('abcdefgh') * 10)
        self.assertEqual(c._bucket0.size, 21 * 2)
        self.assertEqual(c._bucket1.size, 21)
Ejemplo n.º 58
0
    def test_bucket_sizes_with_compression(self):
        c = self._makeOne(cache_local_compression='zlib')
        c._bucket_limit = 21 * 2 + 1
        c.flush_all()

        c.set('k0', b('01234567') * 10)
        self.assertEqual(c._bucket0.size, 21)
        self.assertEqual(c._bucket1.size, 0)

        c.set('k1', b('76543210') * 10)
        self.assertEqual(c._bucket0.size, 21 * 2)
        self.assertEqual(c._bucket1.size, 0)

        c.set('k2', b('abcdefgh') * 10)
        self.assertEqual(c._bucket0.size, 21)
        self.assertEqual(c._bucket1.size, 21 * 2)

        v = c.get('k0')
        self.assertEqual(v, b('01234567') * 10)
        self.assertEqual(c._bucket0.size, 21 * 2)
        self.assertEqual(c._bucket1.size, 21)

        v = c.get('k1')
        self.assertEqual(v, b('76543210') * 10)
        self.assertEqual(c._bucket0.size, 21)
        self.assertEqual(c._bucket1.size, 21 * 2)

        v = c.get('k2')
        self.assertEqual(v, b('abcdefgh') * 10)
        self.assertEqual(c._bucket0.size, 21 * 2)
        self.assertEqual(c._bucket1.size, 21)
Ejemplo n.º 59
0
    def checkCachePolling(self):
        self._storage = self.make_storage(poll_interval=3600,
                                          share_local_cache=False)

        db = DB(self._storage)
        try:
            # Set up the database.
            tm1 = transaction.TransactionManager()
            c1 = db.open(transaction_manager=tm1)
            r1 = c1.root()
            r1['obj'] = obj1 = PersistentMapping({'change': 0})
            tm1.commit()

            # Load and change the object in an independent connection.
            tm2 = transaction.TransactionManager()
            c2 = db.open(transaction_manager=tm2)
            r2 = c2.root()
            r2['obj']['change'] = 1
            tm2.commit()
            # Now c2 has delta_after0.
            self.assertEqual(len(c2._storage._cache.delta_after0), 1)
            c2.close()

            # Change the object in the original connection.
            c1.sync()
            obj1['change'] = 2
            tm1.commit()

            # Close the database connection to c2.
            c2._storage._drop_load_connection()

            # Make the database connection to c2 reopen without polling.
            c2._storage.load(b('\0' * 8), '')
            self.assertTrue(c2._storage._load_transaction_open)

            # Open a connection, which should be the same connection
            # as c2.
            c3 = db.open(transaction_manager=tm2)
            self.assertTrue(c3 is c2)
            self.assertEqual(len(c2._storage._cache.delta_after0), 1)

            # Clear the caches (but not delta_after*)
            c3._resetCache()
            for client in c3._storage._cache.clients_local_first:
                client.flush_all()

            obj3 = c3.root()['obj']
            # Should have loaded the new object.
            self.assertEqual(obj3['change'], 2)

        finally:
            db.close()
Ejemplo n.º 60
0
    def testRedoOfCreation(self):
        database = DB(self._storage)
        connection = database.open()
        root = connection.root()
        blob = Blob()

        transaction.begin()
        blob.open('w').write(b('this is state 1'))
        root['blob'] = blob
        transaction.commit()

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        self.assertRaises(KeyError, root.__getitem__, 'blob')

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        self.assertEqual(blob.open('r').read(), b('this is state 1'))

        database.close()