Example #1
0
 def test_after_tpc_finish(self):
     from relstorage.tests.fakecache import data
     from ZODB.utils import p64
     c = self._makeOne()
     c.tpc_begin()
     c.after_tpc_finish(p64(55))
     c.after_tpc_finish(p64(55))
Example #2
0
    def testBogusObject(self):
        def add(key, obj):
            self.cache[key] = obj

        nones = sys.getrefcount(None)

        key = p64(2)
        # value isn't persistent
        self.assertRaises(TypeError, add, key, 12)

        o = StubObject()
        # o._p_oid == None
        self.assertRaises(TypeError, add, key, o)

        o._p_oid = p64(3)
        self.assertRaises(ValueError, add, key, o)

        o._p_oid = key
        # o._p_jar == None
        self.assertRaises(Exception, add, key, o)

        o._p_jar = self.jar
        self.cache[key] = o
        # make sure it can be added multiple times
        self.cache[key] = o

        # same object, different keys
        self.assertRaises(ValueError, add, p64(0), o)

        if sys.gettrace() is None:
            # 'coverage' keeps track of coverage information in a data
            # structure that adds a new reference to None for each executed
            # line of code, which interferes with this test.  So check it
            # only if we're running without coverage tracing.
            self.assertEqual(sys.getrefcount(None), nones)
 def test_KnownConstants(self):
     self.assertEqual(b"\000\000\000\000\000\000\000\001", p64(1))
     self.assertEqual(b"\000\000\000\001\000\000\000\000", p64(1 << 32))
     self.assertEqual(u64(b"\000\000\000\000\000\000\000\001"), 1)
     self.assertEqual(U64(b"\000\000\000\000\000\000\000\001"), 1)
     self.assertEqual(u64(b"\000\000\000\001\000\000\000\000"), 1 << 32)
     self.assertEqual(U64(b"\000\000\000\001\000\000\000\000"), 1 << 32)
Example #4
0
    def copyToPacktime(self):
        offset = 0L  # the amount of space freed by packing
        pos = self._metadata_size
        new_pos = pos

        while pos < self.gc.packpos:
            th = self._read_txn_header(pos)
            new_tpos, pos = self.copyDataRecords(pos, th)

            if new_tpos:
                new_pos = self._tfile.tell() + 8
                tlen = new_pos - new_tpos - 8
                # Update the transaction length
                self._tfile.seek(new_tpos + 8)
                self._tfile.write(p64(tlen))
                self._tfile.seek(new_pos - 8)
                self._tfile.write(p64(tlen))

            tlen = self._read_num(pos)
            if tlen != th.tlen:
                self.fail(
                    pos, "redundant transaction length does not "
                    "match initial transaction length: %d != %d", tlen,
                    th.tlen)
            pos += 8

        return pos, new_pos
Example #5
0
 def checkKnownConstants(self):
     self.assertEquals("\000\000\000\000\000\000\000\001", p64(1))
     self.assertEquals("\000\000\000\001\000\000\000\000", p64(1L<<32))
     self.assertEquals(u64("\000\000\000\000\000\000\000\001"), 1)
     self.assertEquals(U64("\000\000\000\000\000\000\000\001"), 1)
     self.assertEquals(u64("\000\000\000\001\000\000\000\000"), 1L<<32)
     self.assertEquals(U64("\000\000\000\001\000\000\000\000"), 1L<<32)
Example #6
0
    def render(self):
        self._started = time.time()
        pruneTruncations()
        self.obj = self.selectObjectToView()
        # Not using IObjectHistory(self.obj) because LP#1185175
        self.history = ZodbObjectHistory(self.obj)
        self.latest = True
        if self.request.get('tid'):
            self.state = ZodbObjectState(self.obj,
                                         p64(int(self.request['tid'], 0)),
                                         _history=self.history)
            self.latest = False
        else:
            self.state = ZodbObjectState(self.obj, _history=self.history)

        if 'CANCEL' in self.request:
            self._redirectToSelf()
            return ''

        if 'ROLLBACK' in self.request:
            rtid = p64(int(self.request['rtid'], 0))
            self.requestedState = self._tidToTimestamp(rtid)
            if self.request.get('confirmed') == '1':
                self.history.rollback(rtid)
                transaction.get().note(u'Rollback to old state %s' %
                                       self.requestedState)
                self.made_changes = True
                self._redirectToSelf()
                return ''
            # will show confirmation prompt
            return self.confirmation_template()

        return self.template()
Example #7
0
    def render(self):
        self._started = time.time()
        pruneTruncations()
        self.obj = self.selectObjectToView()
        # Not using IObjectHistory(self.obj) because LP#1185175
        self.history = ZodbObjectHistory(self.obj)
        self.latest = True
        if self.request.get('tid'):
            self.state = ZodbObjectState(self.obj,
                                         p64(int(self.request['tid'], 0)),
                                         _history=self.history)
            self.latest = False
        else:
            self.state = ZodbObjectState(self.obj, _history=self.history)

        if 'CANCEL' in self.request:
            self._redirectToSelf()
            return ''

        if 'ROLLBACK' in self.request:
            rtid = p64(int(self.request['rtid'], 0))
            self.requestedState = self._tidToTimestamp(rtid)
            if self.request.get('confirmed') == '1':
                self.history.rollback(rtid)
                transaction.get().note(u'Rollback to old state %s'
                                       % self.requestedState)
                self.made_changes = True
                self._redirectToSelf()
                return ''
            # will show confirmation prompt
            return self.confirmation_template()

        return self.template()
Example #8
0
 def test_after_tpc_finish(self):
     from relstorage.tests.fakecache import data
     from ZODB.utils import p64
     c = self._makeOne()
     c.tpc_begin()
     c.after_tpc_finish(p64(55))
     c.after_tpc_finish(p64(55))
Example #9
0
    def checkBogusObject(self):
        def add(key, obj):
            self.cache[key] = obj

        nones = sys.getrefcount(None)

        key = p64(2)
        # value isn't persistent
        self.assertRaises(TypeError, add, key, 12)

        o = StubObject()
        # o._p_oid == None
        self.assertRaises(TypeError, add, key, o)

        o._p_oid = p64(3)
        self.assertRaises(ValueError, add, key, o)

        o._p_oid = key
        # o._p_jar == None
        self.assertRaises(Exception, add, key, o)

        o._p_jar = self.jar
        self.cache[key] = o
        # make sure it can be added multiple times
        self.cache[key] = o

        # same object, different keys
        self.assertRaises(ValueError, add, p64(0), o)

        self.assertEqual(sys.getrefcount(None), nones)
Example #10
0
 def test_KnownConstants(self):
     self.assertEqual(b"\000\000\000\000\000\000\000\001", p64(1))
     self.assertEqual(b"\000\000\000\001\000\000\000\000", p64(1<<32))
     self.assertEqual(u64(b"\000\000\000\000\000\000\000\001"), 1)
     self.assertEqual(U64(b"\000\000\000\000\000\000\000\001"), 1)
     self.assertEqual(u64(b"\000\000\000\001\000\000\000\000"), 1<<32)
     self.assertEqual(U64(b"\000\000\000\001\000\000\000\000"), 1<<32)
Example #11
0
    def after_pack(self, oid_int, tid_int):
        """Called after an object state has been removed by packing.

        Removes the corresponding blob file.
        """
        if not self.shared_blob_dir:
            # Not necessary
            return

        oid = p64(oid_int)
        tid = p64(tid_int)
        fn = self.fshelper.getBlobFilename(oid, tid)
        if self.adapter.keep_history:
            # remove only the revision just packed
            if os.path.exists(fn):
                ZODB.blob.remove_committed(fn)
                dirname = os.path.dirname(fn)
                if not _has_files(dirname):
                    ZODB.blob.remove_committed_dir(dirname)
        else:
            # remove all revisions
            dirname = os.path.dirname(fn)
            if os.path.exists(dirname):
                for name in os.listdir(dirname):
                    ZODB.blob.remove_committed(os.path.join(dirname, name))
                ZODB.blob.remove_committed_dir(dirname)
Example #12
0
    def copy_undone(self, copied, tid):
        """After an undo operation, copy the matching blobs forward.

        The copied parameter is a list of (integer oid, integer tid).
        """
        if not self.shared_blob_dir:
            # Not necessary
            return

        for oid_int, old_tid_int in copied:
            oid = p64(oid_int)
            old_tid = p64(old_tid_int)
            orig_fn = self.fshelper.getBlobFilename(oid, old_tid)
            if not os.path.exists(orig_fn):
                # not a blob
                continue

            new_fn = self.fshelper.getBlobFilename(oid, tid)
            orig = open(orig_fn, 'r')
            new = open(new_fn, 'wb')
            ZODB.utils.cp(orig, new)
            orig.close()
            new.close()

            self._add_blob_to_transaction(oid, new_fn)
Example #13
0
    def testBogusObject(self):
        def add(key, obj):
            self.cache[key] = obj

        nones = sys.getrefcount(None)

        key = p64(2)
        # value isn't persistent
        self.assertRaises(TypeError, add, key, 12)

        o = StubObject()
        # o._p_oid == None
        self.assertRaises(TypeError, add, key, o)

        o._p_oid = p64(3)
        self.assertRaises(ValueError, add, key, o)

        o._p_oid = key
        # o._p_jar == None
        self.assertRaises(Exception, add, key, o)

        o._p_jar = self.jar
        self.cache[key] = o
        # make sure it can be added multiple times
        self.cache[key] = o

        # same object, different keys
        self.assertRaises(ValueError, add, p64(0), o)

        if sys.gettrace() is None:
            # 'coverage' keeps track of coverage information in a data
            # structure that adds a new reference to None for each executed
            # line of code, which interferes with this test.  So check it
            # only if we're running without coverage tracing.
            self.assertEqual(sys.getrefcount(None), nones)
Example #14
0
    def copy_undone(self, copied, tid):
        """After an undo operation, copy the matching blobs forward.

        The copied parameter is a list of (integer oid, integer tid).
        """
        if not self.shared_blob_dir:
            # Not necessary
            return

        for oid_int, old_tid_int in copied:
            oid = p64(oid_int)
            old_tid = p64(old_tid_int)
            orig_fn = self.fshelper.getBlobFilename(oid, old_tid)
            if not os.path.exists(orig_fn):
                # not a blob
                continue

            new_fn = self.fshelper.getBlobFilename(oid, tid)
            orig = open(orig_fn, 'rb')
            new = open(new_fn, 'wb')
            ZODB.utils.cp(orig, new)
            orig.close()
            new.close()

            self._add_blob_to_transaction(oid, new_fn)
Example #15
0
    def after_pack(self, oid_int, tid_int):
        """Called after an object state has been removed by packing.

        Removes the corresponding blob file.
        """
        if not self.shared_blob_dir:
            # Not necessary
            return

        oid = p64(oid_int)
        tid = p64(tid_int)
        fn = self.fshelper.getBlobFilename(oid, tid)
        if self.adapter.keep_history:
            # remove only the revision just packed
            if os.path.exists(fn):
                ZODB.blob.remove_committed(fn)
                dirname = os.path.dirname(fn)
                if not _has_files(dirname):
                    ZODB.blob.remove_committed_dir(dirname)
        else:
            # remove all revisions
            dirname = os.path.dirname(fn)
            if os.path.exists(dirname):
                for name in os.listdir(dirname):
                    ZODB.blob.remove_committed(os.path.join(dirname, name))
                ZODB.blob.remove_committed_dir(dirname)
Example #16
0
    def copyToPacktime(self):
        offset = 0L  # the amount of space freed by packing
        pos = self._metadata_size
        new_pos = pos

        while pos < self.gc.packpos:
            th = self._read_txn_header(pos)
            new_tpos, pos = self.copyDataRecords(pos, th)

            if new_tpos:
                new_pos = self._tfile.tell() + 8
                tlen = new_pos - new_tpos - 8
                # Update the transaction length
                self._tfile.seek(new_tpos + 8)
                self._tfile.write(p64(tlen))
                self._tfile.seek(new_pos - 8)
                self._tfile.write(p64(tlen))


            tlen = self._read_num(pos)
            if tlen != th.tlen:
                self.fail(pos, "redundant transaction length does not "
                          "match initial transaction length: %d != %d",
                          tlen, th.tlen)
            pos += 8

        return pos, new_pos
Example #17
0
    def checkBogusObject(self):
        def add(key, obj):
            self.cache[key] = obj

        nones = sys.getrefcount(None)

        key = p64(2)
        # value isn't persistent
        self.assertRaises(TypeError, add, key, 12)

        o = StubObject()
        # o._p_oid == None
        self.assertRaises(TypeError, add, key, o)

        o._p_oid = p64(3)
        self.assertRaises(ValueError, add, key, o)

        o._p_oid = key
        # o._p_jar == None
        self.assertRaises(Exception, add, key, o)

        o._p_jar = self.jar
        self.cache[key] = o
        # make sure it can be added multiple times
        self.cache[key] = o

        # same object, different keys
        self.assertRaises(ValueError, add, p64(0), o)

        self.assertEqual(sys.getrefcount(None), nones)
Example #18
0
    def locate(self, path):
        not_found = object() # marker

        # our current position
        #   partial -- path of the last _persistent_ object
        #   here -- path of the last object traversed
        #   oid -- oid of the last _persistent_ object
        #   obj -- last object traversed
        partial = here = '/'
        oid = self.getRootOid()
        obj = self.jar.get(p64(oid))

        steps = path.split('/')

        if steps and steps[0]:
            # 0x1234/sub/path -> start traversal at oid 0x1234
            try:
                oid = int(steps[0], 0)
            except ValueError:
                pass
            else:
                partial = here = hex(oid)
                try:
                    obj = self.jar.get(p64(oid))
                except KeyError:
                    oid = self.getRootOid()
                    return dict(error='Not found: %s' % steps[0],
                                partial_oid=oid,
                                partial_path='/',
                                partial_url=self.getUrl(oid))
                steps = steps[1:]

        for step in steps:
            if not step:
                continue
            if not here.endswith('/'):
                here += '/'
            here += step
            try:
                child = obj[step]
            except Exception:
                child = getattr(obj, step, not_found)
                if child is not_found:
                    return dict(error='Not found: %s' % here,
                                partial_oid=oid,
                                partial_path=partial,
                                partial_url=self.getUrl(oid))
            obj = child
            if isinstance(obj, Persistent):
                partial = here
                oid = u64(obj._p_oid)
        if not isinstance(obj, Persistent):
            return dict(error='Not persistent: %s' % here,
                        partial_oid=oid,
                        partial_path=partial,
                        partial_url=self.getUrl(oid))
        return dict(oid=oid,
                    url=self.getUrl(oid))
Example #19
0
    def locate(self, path):
        not_found = object() # marker

        # our current position
        #   partial -- path of the last _persistent_ object
        #   here -- path of the last object traversed
        #   oid -- oid of the last _persistent_ object
        #   obj -- last object traversed
        partial = here = '/'
        oid = self.getRootOid()
        obj = self.jar.get(p64(oid))

        steps = path.split('/')

        if steps and steps[0]:
            # 0x1234/sub/path -> start traversal at oid 0x1234
            try:
                oid = int(steps[0], 0)
            except ValueError:
                pass
            else:
                partial = here = hex(oid)
                try:
                    obj = self.jar.get(p64(oid))
                except KeyError:
                    oid = self.getRootOid()
                    return dict(error='Not found: %s' % steps[0],
                                partial_oid=oid,
                                partial_path='/',
                                partial_url=self.getUrl(oid))
                steps = steps[1:]

        for step in steps:
            if not step:
                continue
            if not here.endswith('/'):
                here += '/'
            here += step
            try:
                child = obj[step]
            except Exception:
                child = getattr(obj, step, not_found)
                if child is not_found:
                    return dict(error='Not found: %s' % here,
                                partial_oid=oid,
                                partial_path=partial,
                                partial_url=self.getUrl(oid))
            obj = child
            if isinstance(obj, Persistent):
                partial = here
                oid = u64(obj._p_oid)
        if not isinstance(obj, Persistent):
            return dict(error='Not persistent: %s' % here,
                        partial_oid=oid,
                        partial_path=partial,
                        partial_url=self.getUrl(oid))
        return dict(oid=oid,
                    url=self.getUrl(oid))
Example #20
0
    def test_p64_bad_object(self):
        with self.assertRaises(ValueError) as exc:
            p64(2 ** 65)

        e = exc.exception
        # The args will be whatever the struct.error args were,
        # which vary from version to version and across implementations,
        # followed by the bad value
        self.assertEqual(e.args[-1], 2 ** 65)
Example #21
0
    def test_p64_bad_object(self):
        with self.assertRaises(ValueError) as exc:
            p64(2 ** 65)

        e = exc.exception
        # The args will be whatever the struct.error args were,
        # which vary from version to version and across implementations,
        # followed by the bad value
        self.assertEqual(e.args[-1], 2 ** 65)
Example #22
0
 def test_load_using_checkpoint1_hit(self):
     from relstorage.tests.fakecache import data
     from ZODB.utils import p64
     adapter = MockAdapter()
     c = self.getClass()(adapter, MockOptionsWithFakeCache())
     c.current_tid = 60
     c.checkpoints = (50, 40)
     data['myprefix:state:40:2'] = p64(35) + '123'
     res = c.load(None, 2)
     self.assertEqual(res, ('123', 35))
     self.assertEqual(data.get('myprefix:state:50:2'), p64(35) + '123')
Example #23
0
 def test_after_tpc_finish(self):
     from relstorage.tests.fakecache import data
     from ZODB.utils import p64
     c = self._makeOne()
     c.tpc_begin()
     c.after_tpc_finish(p64(55))
     count = data['myprefix:commits']
     self.assert_(count > 0)
     c.after_tpc_finish(p64(55))
     newcount = data['myprefix:commits']
     self.assert_(newcount == count + 1)
Example #24
0
 def test_load_using_delta_after1_hit(self):
     from relstorage.tests.fakecache import data
     adapter = MockAdapter()
     c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix')
     c.current_tid = 60
     c.checkpoints = (50, 40)
     c.delta_after1[2] = 45
     data['myprefix:state:45:2'] = p64(45) + b'abc'
     res = c.load(None, 2)
     self.assertEqual(res, (b'abc', 45))
     self.assertEqual(data.get('myprefix:state:50:2'), p64(45) + b'abc')
Example #25
0
    def testItems(self):
        items = list(self.index.iteritems())
        items.sort()

        for i, item in enumerate(items):
            self.assertEqual(item, (p64(i * 1000), (i * 1000L + 1)))

        items = self.index.items()
        items.sort()

        for i, item in enumerate(items):
            self.assertEqual(item, (p64(i * 1000), (i * 1000L + 1)))
Example #26
0
    def testItems(self):
        items = list(self.index.iteritems())
        items.sort()

        for i, item in enumerate(items):
            self.assertEqual(item, (p64(i * 1000), (i * 1000L + 1)))

        items = self.index.items()
        items.sort()

        for i, item in enumerate(items):
            self.assertEqual(item, (p64(i * 1000), (i * 1000L + 1)))
Example #27
0
 def test_load_using_delta_after1_hit(self):
     from relstorage.tests.fakecache import data
     from ZODB.utils import p64
     adapter = MockAdapter()
     c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix')
     c.current_tid = 60
     c.checkpoints = (50, 40)
     c.delta_after1[2] = 45
     data['myprefix:state:45:2'] = p64(45) + b'abc'
     res = c.load(None, 2)
     self.assertEqual(res, (b'abc', 45))
     self.assertEqual(data.get('myprefix:state:50:2'), p64(45) + b'abc')
Example #28
0
    def test_clear_zeo_cache(self):
        cache = self.cache
        for i in range(10):
            cache.store(p64(i), n2, None, str(i))
            cache.store(p64(i), n1, n2, str(i) + 'old')
        self.assertEqual(len(cache), 20)
        self.assertEqual(cache.load(n3), ('3', n2))
        self.assertEqual(cache.loadBefore(n3, n2), ('3old', n1, n2))

        cache.clear()
        self.assertEqual(len(cache), 0)
        self.assertEqual(cache.load(n3), None)
        self.assertEqual(cache.loadBefore(n3, n2), None)
Example #29
0
    def test_clear_zeo_cache(self):
        cache = self.cache
        for i in range(10):
            cache.store(p64(i), n2, None, str(i).encode())
            cache.store(p64(i), n1, n2, str(i).encode()+b'old')
        self.assertEqual(len(cache), 20)
        self.assertEqual(cache.load(n3), (b'3', n2))
        self.assertEqual(cache.loadBefore(n3, n2), (b'3old', n1, n2))

        cache.clear()
        self.assertEqual(len(cache), 0)
        self.assertEqual(cache.load(n3), None)
        self.assertEqual(cache.loadBefore(n3, n2), None)
Example #30
0
 def store(self, oid, serial, data, version, transaction):
     # we have this funny signature so we can reuse the normal non-commit
     # commit logic
     assert version == ''
     self._file.seek(self.position)
     l = len(data)
     if serial is None:
         serial = z64
     header = p64(len(oid)) + oid + serial + p64(l)
     self._file.write(header)
     self._file.write(data)
     self.index[oid] = self.position
     self.position += l + len(header)
     return serial
Example #31
0
    def testBogusObject(self):
        def add(key, obj):
            self.cache[key] = obj

        # getrefcount is an implementation detail of CPython,
        # not present under PyPy/Jython
        rc = getattr(sys, 'getrefcount', lambda x: 1)
        nones = rc(None)

        key = p64(2)
        # value isn't persistent
        self.assertRaises(TypeError, add, key, 12)

        o = StubObject()
        # o._p_oid == None
        self.assertRaises(TypeError, add, key, o)

        o._p_oid = p64(3)
        self.assertRaises(ValueError, add, key, o)

        o._p_oid = key
        # o._p_jar == None
        self.assertRaises(Exception, add, key, o)

        o._p_jar = self.jar
        self.cache[key] = o
        # make sure it can be added multiple times
        self.cache[key] = o

        # same object, different keys
        self.assertRaises(ValueError, add, p64(0), o)

        if sys.gettrace() is None:
            # 'coverage' keeps track of coverage information in a data
            # structure that adds a new reference to None for each executed
            # line of code, which interferes with this test.  So check it
            # only if we're running without coverage tracing.

            # On Python 3.7, we can see the value of reference counts
            # to None actually go *down* by a few. Possibly it has to
            # do with the lazy tracking of frames?
            # (https://github.com/python/cpython/commit/5a625d0aa6a6d9ec6574ee8344b41d63dcb9897e)
            #
            # Likewise, on 3.8 with PURE_PYTHON it sometimes increases
            # by 1; this is cleared up by a garbage collection (it's
            # not clear where/why)
            new_nones = rc(None)
            if new_nones > nones:
                gc.collect()
            self.assertLessEqual(rc(None), nones)
Example #32
0
    def getBlobFilename(self, oid, tid):
        """Given an oid and a tid, return the full filename of the
        'committed' blob file related to that oid and tid.

        """
        # TIDs are numbers and sometimes passed around as integers. For our
        # computations we rely on the 64-bit packed string representation
        if isinstance(oid, int):
            oid = utils.p64(oid)
        if isinstance(tid, int):
            tid = utils.p64(tid)
        return os.path.join(self.base_dir,
                            self.layout.getBlobFilePath(oid, tid),
                            )
def save_record(parser, tag, data):
    file=parser.file
    write=file.write
    pos=file.tell()
    file.seek(pos)
    a=data[1]
    if a.has_key('id'): oid=a['id']
    oid=p64(int(oid))
    v=''
    for x in data[2:]:
        v=v+x
    l=p64(len(v))
    v=oid+l+v
    return v
Example #34
0
 def store(self, oid, serial, data, version, transaction):
     # we have this funny signature so we can reuse the normal non-commit
     # commit logic
     assert version == ''
     self._file.seek(self.position)
     l = len(data)
     if serial is None:
         serial = z64
     header = p64(len(oid)) + oid + serial + p64(l)
     self._file.write(header)
     self._file.write(data)
     self.index[oid] = self.position
     self.position += l + len(header)
     return serial
Example #35
0
def save_record(parser, tag, data):
    file = parser.file
    write = file.write
    pos = file.tell()
    file.seek(pos)
    a = data[1]
    if a.has_key('id'): oid = a['id']
    oid = p64(int(oid))
    v = ''
    for x in data[2:]:
        v = v + x
    l = p64(len(v))
    v = oid + l + v
    return v
Example #36
0
    def getBlobFilename(self, oid, tid):
        """Given an oid and a tid, return the full filename of the
        'committed' blob file related to that oid and tid.

        """
        # TIDs are numbers and sometimes passed around as integers. For our
        # computations we rely on the 64-bit packed string representation
        if isinstance(oid, int):
            oid = utils.p64(oid)
        if isinstance(tid, int):
            tid = utils.p64(tid)
        return os.path.join(self.base_dir,
                            self.layout.getBlobFilePath(oid, tid),
                            )
Example #37
0
    def testInserts(self):
        index = self.index

        for i in range(0, 200):
            self.assertEqual((i, index[p64(i * 1000)]), (i, (i * 1000L + 1)))

        self.assertEqual(len(index), 200)

        key = p64(2000)

        self.assertEqual(index.get(key), 2001)

        key = p64(2001)
        self.assertEqual(index.get(key), None)
        self.assertEqual(index.get(key, ''), '')
Example #38
0
    def testInserts(self):
        index = self.index

        for i in range(0,200):
            self.assertEqual((i,index[p64(i*1000)]), (i,(i*1000L+1)))

        self.assertEqual(len(index), 200)

        key=p64(2000)

        self.assertEqual(index.get(key), 2001)

        key=p64(2001)
        self.assertEqual(index.get(key), None)
        self.assertEqual(index.get(key, ''), '')
Example #39
0
    def copy(self, oid, serial, data, prev_txn, txnpos, datapos):
        prev_pos = self._resolve_backpointer(prev_txn, oid, data)
        old = self._index.get(oid, 0)
        # Calculate the pos the record will have in the storage.
        here = datapos
        # And update the temp file index
        self._tindex[oid] = here
        if prev_pos:
            # If there is a valid prev_pos, don't write data.
            data = None
        if data is None:
            dlen = 0
        else:
            dlen = len(data)
        # Write the recovery data record
        h = DataHeader(oid, serial, old, txnpos, 0, dlen)

        self._file.write(h.asString())
        # Write the data or a backpointer
        if data is None:
            if prev_pos:
                self._file.write(p64(prev_pos))
            else:
                # Write a zero backpointer, which indicates an
                # un-creation transaction.
                self._file.write(z64)
        else:
            self._file.write(data)
Example #40
0
 def test_send_queue_none(self):
     from relstorage.tests.fakecache import data
     c = self._makeOne()
     c.tpc_begin()
     tid = p64(55)
     c._send_queue(tid)
     self.assertEqual(data, {})
Example #41
0
def _mapOid(id_mapping, oid):
    idprefix = str(u64(oid))
    id = id_mapping[idprefix]
    old_aka = encodestring(oid)[:-1]
    aka=encodestring(p64(long(id)))[:-1]  # Rebuild oid based on mapped id
    id_mapping.setConvertedAka(old_aka, aka)
    return idprefix+'.', id, aka
Example #42
0
    def testEviction(self):
        # Manually override the current maxsize
        cache = ZEO.cache.ClientCache(None, 3395)

        # Trivial test of eviction code.  Doesn't test non-current
        # eviction.
        data = [b"z" * i for i in range(100)]
        for i in range(50):
            n = p64(i)
            cache.store(n, n, None, data[i])
            self.assertEquals(len(cache), i + 1)
        # The cache is now almost full.  The next insert
        # should delete some objects.
        n = p64(50)
        cache.store(n, n, None, data[51])
        self.assert_(len(cache) < 51)
Example #43
0
    def loadBefore(self, oid, before_tid):
        noncurrent_for_oid = self.noncurrent.get(u64(oid))
        if noncurrent_for_oid is None:
            self._trace(0x24, oid, "", before_tid)
            return None

        items = noncurrent_for_oid.items(None, u64(before_tid)-1)
        if not items:
            self._trace(0x24, oid, "", before_tid)
            return None
        tid, ofs = items[-1]

        self.f.seek(ofs)
        read = self.f.read
        assert read(1) == 'a', (ofs, self.f.tell(), oid, before_tid)
        size, saved_oid, saved_tid, end_tid, lver, ldata = unpack(
            ">I8s8s8shI", read(34))
        assert saved_oid == oid, (ofs, self.f.tell(), oid, saved_oid)
        assert saved_tid == p64(tid), (ofs, self.f.tell(), oid, saved_tid, tid)
        assert lver == 0, (ofs, self.f.tell())
        assert end_tid != z64, (ofs, self.f.tell(), oid)
        data = read(ldata)
        assert len(data) == ldata, (ofs, self.f.tell())
        assert read(8) == oid, (ofs, self.f.tell(), oid)
        
        if end_tid < before_tid:
            self._trace(0x24, oid, "", before_tid)
            return None
            
        self._n_accesses += 1
        self._trace(0x26, oid, "", saved_tid)
        return data, saved_tid, end_tid
Example #44
0
    def testEviction(self):
        # Manually override the current maxsize
        cache = ZEO.cache.ClientCache(None, 3395)

        # Trivial test of eviction code.  Doesn't test non-current
        # eviction.
        data = ["z" * i for i in range(100)]
        for i in range(50):
            n = p64(i)
            cache.store(n, n, None, data[i])
            self.assertEquals(len(cache), i + 1)
        # The cache is now almost full.  The next insert
        # should delete some objects.
        n = p64(50)
        cache.store(n, n, None, data[51])
        self.assert_(len(cache) < 51)
 def test_LongToStringToLong(self):
     for num in self.all:
         s = p64(num)
         n = U64(s)
         self.assertEqual(num, n, "U64() failed")
         n2 = u64(s)
         self.assertEqual(num, n2, "u64() failed")
Example #46
0
    def checkLoadBefore(self):
        # Store 10 revisions of one object and then make sure that we
        # can get all the non-current revisions back.
        oid = self._storage.new_oid()
        revs = []
        revid = None
        for i in range(10):
            # We need to ensure that successive timestamps are at least
            # two apart, so that a timestamp exists that's unambiguously
            # between successive timestamps.  Each call to snooze()
            # guarantees that the next timestamp will be at least one
            # larger (and probably much more than that) than the previous
            # one.
            snooze()
            snooze()
            revid = self._dostore(oid, revid, data=MinPO(i))
            revs.append(load_current(self._storage, oid))

        prev = u64(revs[0][1])
        for i in range(1, 10):
            tid = revs[i][1]
            cur = u64(tid)
            middle = prev + (cur - prev) // 2
            assert prev < middle < cur  # else the snooze() trick failed
            prev = cur
            t = self._storage.loadBefore(oid, p64(middle))
            self.assertTrue(t is not None)
            data, start, end = t
            self.assertEqual(revs[i-1][0], data)
            self.assertEqual(tid, end)
Example #47
0
    def testMinKey(self):
        index = self.index
        index.clear()

        # An empty index should complain.
        self.assertRaises(ValueError, index.minKey)

        # Now build up a tree with random values, and check minKey at each
        # step.
        correct_min = "\xff" * 8  # bigger than anything we'll add
        for i in range(1000):
            key = p64(random.randrange(100000000))
            index[key] = i
            correct_min = min(correct_min, key)
            index_min = index.minKey()
            self.assertEqual(index_min, correct_min)

        index.clear()
        a = '\000\000\000\000\000\001\000\000'
        b = '\000\000\000\000\000\002\000\000'
        c = '\000\000\000\000\000\003\000\000'
        d = '\000\000\000\000\000\004\000\000'
        index[a] = 1
        index[c] = 2
        self.assertEqual(index.minKey(b), c)
        self.assertRaises(ValueError, index.minKey, d)
Example #48
0
    def loadBefore(self, oid, tid):
        """Return most recent revision of oid before tid committed."""
        #TODO check recent cache first
        max_serial = p64(u64(self._s3.getSerialForTid(tid)) - 1)
        keydata = self._s3.findLastCommit(oid, max_serial)

        oid = oid_unrepr(keydata['oid'])
        serial = serial_unrepr(keydata['serial'])
        #get the pickle
        data = self._s3.loadPickle(oid, serial)


        # need another index for this
        #get the end date (should check recent cache first)
        #prefix = 'type:commit,'
        #marker = 'type:commit,oid:%s,' % oid_key
        #rs = self._bucket.get_all_keys(prefix=prefix, marker=marker,
        #                               maxkeys=1)
        #TODO error handling
        #assert len(keys) == 1
        #key = rs[0]
        #enddata = dict_from_key(key.key)
        #if enddata['tid'] > keydata['tid']:
        #    end = p64(serial_unrepr(enddata['tid']))
        #else:
        #    end = None
        end = None

        start = tid_unrepr(keydata['tid'])

        return data, start, end
Example #49
0
    def checkLoadBeforeUndo(self):
        # Do several transactions then undo them.
        oid = self._storage.new_oid()
        revid = None
        for i in range(5):
            revid = self._dostore(oid, revid, data=MinPO(i))
        revs = []
        for i in range(4):
            info = self._storage.undoInfo()
            tid = info[0]["id"]
            # Always undo the most recent txn, so the value will
            # alternate between 3 and 4.
            self._undo(tid, note="undo %d" % i)
            revs.append(load_current(self._storage, oid))

        prev_tid = None
        for i, (data, tid) in enumerate(revs):
            t = self._storage.loadBefore(oid, p64(u64(tid) + 1))
            self.assertEqual(data, t[0])
            self.assertEqual(tid, t[1])
            if prev_tid:
                self.assertTrue(prev_tid < t[1])
            prev_tid = t[1]
            if i < 3:
                self.assertEqual(revs[i+1][1], t[2])
            else:
                self.assertEqual(None, t[2])
Example #50
0
    def listHistory(self):
        if 'tid' in self.request:
            requested_tid = p64(int(self.request['tid'], 0))
        else:
            requested_tid = None

        results = []
        for n, d in enumerate(self.history[self.first_idx:self.last_idx]):
            utid = u64(d.tid)
            ts = TimeStamp(d.tid).timeTime()
            utc_timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(ts))
            local_timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(ts))
            try:
                user_location, user_id = d.user.split()
            except ValueError:
                user_location = None
                user_id = d.user
            try:
                size = d._tend - d._tpos
            except AttributeError:
                size = None
            ext = d.extension if isinstance(d.extension, dict) else {}
            objects = []
            for record in d:
                obj = self.jar.get(record.oid)
                url = "@@zodbbrowser?oid=0x%x&tid=0x%x" % (u64(record.oid),
                                                           utid)
                objects.append(dict(
                    oid=u64(record.oid),
                    path=getObjectPath(obj, d.tid),
                    oid_repr=oid_repr(record.oid),
                    class_repr=getObjectType(obj),
                    url=url,
                    repr=IValueRenderer(obj).render(d.tid),
                ))
            if len(objects) == 1:
                summary = '1 object record'
            else:
                summary = '%d object records' % len(objects)
            if size is not None:
                summary += ' (%d bytes)' % size
            results.append(dict(
                index=(self.first_idx + n + 1),
                utc_timestamp=utc_timestamp,
                local_timestamp=local_timestamp,
                user_id=user_id,
                user_location=user_location,
                description=d.description,
                utid=utid,
                current=(d.tid == requested_tid),
                href=self.getUrl(tid=utid),
                size=size,
                summary=summary,
                hidden=(len(objects) > 5),
                objects=objects,
                **ext
            ))
        if results and not requested_tid and self.page == 0:
            results[-1]['current'] = True
        return results[::-1]
Example #51
0
    def testMinKey(self):
        index = self.index
        index.clear()

        # An empty index should complain.
        self.assertRaises(ValueError, index.minKey)

        # Now build up a tree with random values, and check minKey at each
        # step.
        correct_min = "\xff" * 8   # bigger than anything we'll add
        for i in range(1000):
            key = p64(random.randrange(100000000))
            index[key] = i
            correct_min = min(correct_min, key)
            index_min = index.minKey()
            self.assertEqual(index_min, correct_min)

        index.clear()
        a = '\000\000\000\000\000\001\000\000'
        b = '\000\000\000\000\000\002\000\000'
        c = '\000\000\000\000\000\003\000\000'
        d = '\000\000\000\000\000\004\000\000'
        index[a] = 1
        index[c] = 2
        self.assertEqual(index.minKey(b), c)
        self.assertRaises(ValueError, index.minKey, d)
Example #52
0
    def testMaxKey(self):
        index = self.index
        index.clear()

        # An empty index should complain.
        self.assertRaises(ValueError, index.maxKey)

        # Now build up a tree with random values, and check maxKey at each
        # step.
        correct_max = ""   # smaller than anything we'll add
        for i in range(1000):
            key = p64(random.randrange(100000000))
            index[key] = i
            correct_max = max(correct_max, key)
            index_max = index.maxKey()
            self.assertEqual(index_max, correct_max)

        index.clear()
        a = '\000\000\000\000\000\001\000\000'
        b = '\000\000\000\000\000\002\000\000'
        c = '\000\000\000\000\000\003\000\000'
        d = '\000\000\000\000\000\004\000\000'
        index[a] = 1
        index[c] = 2
        self.assertEqual(index.maxKey(b), a)
        self.assertEqual(index.maxKey(d), c)
        self.assertRaises(ValueError, index.maxKey, z64)
Example #53
0
    def testMaxKey(self):
        index = self.index
        index.clear()

        # An empty index should complain.
        self.assertRaises(ValueError, index.maxKey)

        # Now build up a tree with random values, and check maxKey at each
        # step.
        correct_max = ""  # smaller than anything we'll add
        for i in range(1000):
            key = p64(random.randrange(100000000))
            index[key] = i
            correct_max = max(correct_max, key)
            index_max = index.maxKey()
            self.assertEqual(index_max, correct_max)

        index.clear()
        a = '\000\000\000\000\000\001\000\000'
        b = '\000\000\000\000\000\002\000\000'
        c = '\000\000\000\000\000\003\000\000'
        d = '\000\000\000\000\000\004\000\000'
        index[a] = 1
        index[c] = 2
        self.assertEqual(index.maxKey(b), a)
        self.assertEqual(index.maxKey(d), c)
        self.assertRaises(ValueError, index.maxKey, z64)
Example #54
0
    def checkLoadBeforeUndo(self):
        # Do several transactions then undo them.
        oid = self._storage.new_oid()
        revid = None
        for i in range(5):
            revid = self._dostore(oid, revid, data=MinPO(i))
        revs = []
        for i in range(4):
            info = self._storage.undoInfo()
            tid = info[0]["id"]
            # Always undo the most recent txn, so the value will
            # alternate between 3 and 4.
            self._undo(tid, [oid], note="undo %d" % i)
            revs.append(self._storage.loadEx(oid, ""))

        prev_tid = None
        for i, (data, tid, ver) in enumerate(revs):
            t = self._storage.loadBefore(oid, p64(u64(tid) + 1))
            self.assertEqual(data, t[0])
            self.assertEqual(tid, t[1])
            if prev_tid:
                self.assert_(prev_tid < t[1])
            prev_tid = t[1]
            if i < 3:
                self.assertEqual(revs[i + 1][1], t[2])
            else:
                self.assertEqual(None, t[2])
Example #55
0
    def checkLoadBeforeConsecutiveTids(self):
        eq = self.assertEqual
        oid = self._storage.new_oid()

        def helper(tid, revid, x):
            data = zodb_pickle(MinPO(x))
            t = transaction.Transaction()
            try:
                self._storage.tpc_begin(t, p64(tid))
                r1 = self._storage.store(oid, revid, data, '', t)
                # Finish the transaction
                r2 = self._storage.tpc_vote(t)
                newrevid = handle_serials(oid, r1, r2)
                self._storage.tpc_finish(t)
            except:
                self._storage.tpc_abort(t)
                raise
            return newrevid

        revid1 = helper(1, None, 1)
        revid2 = helper(2, revid1, 2)
        revid3 = helper(3, revid2, 3)
        data, start_tid, end_tid = self._storage.loadBefore(oid, p64(2))
        eq(zodb_unpickle(data), MinPO(1))
        eq(u64(start_tid), 1)
        eq(u64(end_tid), 2)
Example #56
0
    def checkLoadBefore(self):
        # Store 10 revisions of one object and then make sure that we
        # can get all the non-current revisions back.
        oid = self._storage.new_oid()
        revs = []
        revid = None
        for i in range(10):
            # We need to ensure that successive timestamps are at least
            # two apart, so that a timestamp exists that's unambiguously
            # between successive timestamps.  Each call to snooze()
            # guarantees that the next timestamp will be at least one
            # larger (and probably much more than that) than the previous
            # one.
            snooze()
            snooze()
            revid = self._dostore(oid, revid, data=MinPO(i))
            revs.append(self._storage.loadEx(oid, ""))

        prev = u64(revs[0][1])
        for i in range(1, 10):
            tid = revs[i][1]
            cur = u64(tid)
            middle = prev + (cur - prev) // 2
            assert prev < middle < cur  # else the snooze() trick failed
            prev = cur
            t = self._storage.loadBefore(oid, p64(middle))
            self.assert_(t is not None)
            data, start, end = t
            self.assertEqual(revs[i - 1][0], data)
            self.assertEqual(tid, end)
Example #57
0
    def copy(self, oid, serial, data, prev_txn, txnpos, datapos):
        prev_pos = self._resolve_backpointer(prev_txn, oid, data)
        old = self._index.get(oid, 0)
        # Calculate the pos the record will have in the storage.
        here = datapos
        # And update the temp file index
        self._tindex[oid] = here
        if prev_pos:
            # If there is a valid prev_pos, don't write data.
            data = None
        if data is None:
            dlen = 0
        else:
            dlen = len(data)
        # Write the recovery data record
        h = DataHeader(oid, serial, old, txnpos, 0, dlen)

        self._file.write(h.asString())
        # Write the data or a backpointer
        if data is None:
            if prev_pos:
                self._file.write(p64(prev_pos))
            else:
                # Write a zero backpointer, which indicates an
                # un-creation transaction.
                self._file.write(z64)
        else:
            self._file.write(data)
Example #58
0
 def checkLongToStringToLong(self):
     for num in self.all:
         s = p64(num)
         n = U64(s)
         self.assertEquals(num, n, "U64() failed")
         n2 = u64(s)
         self.assertEquals(num, n2, "u64() failed")