Beispiel #1
0
def read_txn_header(f, pos, file_size, outp, ltid):
    # Read the transaction record
    f.seek(pos)
    h = f.read(23)
    if len(h) < 23:
        raise EOFError

    tid, stl, status, ul, dl, el = unpack(">8s8scHHH",h)
    status = as_text(status)
    tl = u64(stl)

    if pos + (tl + 8) > file_size:
        error("bad transaction length at %s", pos)

    if tl < (23 + ul + dl + el):
        error("invalid transaction length, %s, at %s", tl, pos)

    if ltid and tid < ltid:
        error("time-stamp reducation %s < %s, at %s", u64(tid), u64(ltid), pos)

    if status == "c":
        truncate(f, pos, file_size, outp)
        raise EOFError

    if status not in " up":
        error("invalid status, %r, at %s", status, pos)

    tpos = pos
    tend = tpos + tl

    if status == "u":
        # Undone transaction, skip it
        f.seek(tend)
        h = f.read(8)
        if h != stl:
            error("inconsistent transaction length at %s", pos)
        pos = tend + 8
        return pos, None, tid

    pos = tpos+(23+ul+dl+el)
    user = f.read(ul)
    description = f.read(dl)
    if el:
        try: e = loads(f.read(el))
        except: e = {}
    else: e = {}

    result = TransactionRecord(tid, status, user, description, e, pos, tend,
                               f, tpos)
    pos = tend

    # Read the (intentionally redundant) transaction length
    f.seek(pos)
    h = f.read(8)
    if h != stl:
        error("redundant transaction length check failed at %s", pos)
    pos += 8

    return pos, result, tid
Beispiel #2
0
 def checkPackAllRevisions(self):
     self._initroot()
     eq = self.assertEqual
     raises = self.assertRaises
     # Create a `persistent' object
     obj = self._newobj()
     oid = obj.getoid()
     obj.value = 1
     # Commit three different revisions
     revid1 = self._dostoreNP(oid, data=pdumps(obj))
     obj.value = 2
     revid2 = self._dostoreNP(oid, revid=revid1, data=pdumps(obj))
     obj.value = 3
     revid3 = self._dostoreNP(oid, revid=revid2, data=pdumps(obj))
     # Now make sure all three revisions can be extracted
     data = self._storage.loadSerial(oid, revid1)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 1)
     data = self._storage.loadSerial(oid, revid2)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 2)
     data = self._storage.loadSerial(oid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)
     # Now pack all transactions; need to sleep a second to make
     # sure that the pack time is greater than the last commit time.
     now = packtime = time.time()
     while packtime <= now:
         packtime = time.time()
     self._storage.pack(packtime, referencesf)
     # All revisions of the object should be gone, since there is no
     # reference from the root object to this object.
     raises(KeyError, self._storage.loadSerial, oid, revid1)
     raises(KeyError, self._storage.loadSerial, oid, revid2)
     raises(KeyError, self._storage.loadSerial, oid, revid3)
Beispiel #3
0
 def checkPackAllRevisions(self):
     self._initroot()
     eq = self.assertEqual
     raises = self.assertRaises
     # Create a `persistent' object
     obj = self._newobj()
     oid = obj.getoid()
     obj.value = 1
     # Commit three different revisions
     revid1 = self._dostoreNP(oid, data=pdumps(obj))
     obj.value = 2
     revid2 = self._dostoreNP(oid, revid=revid1, data=pdumps(obj))
     obj.value = 3
     revid3 = self._dostoreNP(oid, revid=revid2, data=pdumps(obj))
     # Now make sure all three revisions can be extracted
     data = self._storage.loadSerial(oid, revid1)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 1)
     data = self._storage.loadSerial(oid, revid2)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 2)
     data = self._storage.loadSerial(oid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)
     # Now pack all transactions; need to sleep a second to make
     # sure that the pack time is greater than the last commit time.
     now = packtime = time.time()
     while packtime <= now:
         packtime = time.time()
     self._storage.pack(packtime, referencesf)
     # All revisions of the object should be gone, since there is no
     # reference from the root object to this object.
     raises(KeyError, self._storage.loadSerial, oid, revid1)
     raises(KeyError, self._storage.loadSerial, oid, revid2)
     raises(KeyError, self._storage.loadSerial, oid, revid3)
Beispiel #4
0
 def read_meta(self):
     """Load user, descr, and ext attributes."""
     self.user = ""
     self.descr = ""
     self.ext = {}
     if not (self.user_len or self.descr_len or self.ext_len):
         return
     self._file.seek(self._pos + TRANS_HDR_LEN)
     if self.user_len:
         self.user = self._file.read(self.user_len)
     if self.descr_len:
         self.descr = self._file.read(self.descr_len)
     if self.ext_len:
         self._ext = self._file.read(self.ext_len)
         self.ext = loads(self._ext)
Beispiel #5
0
 def read_meta(self):
     """Load user, descr, and ext attributes."""
     self.user = ""
     self.descr = ""
     self.ext = {}
     if not (self.user_len or self.descr_len or self.ext_len):
         return
     self._file.seek(self._pos + TRANS_HDR_LEN)
     if self.user_len:
         self.user = self._file.read(self.user_len)
     if self.descr_len:
         self.descr = self._file.read(self.descr_len)
     if self.ext_len:
         self._ext = self._file.read(self.ext_len)
         self.ext = loads(self._ext)
Beispiel #6
0
    def undoLog(self, first, last, filter=None, block=0):
        # XXX: undoLog is broken
        if last < 0:
            # See FileStorage.py for explanation
            last = first - last

        # First get a list of transactions from all storage nodes.
        # Each storage node will return TIDs only for UP_TO_DATE state and
        # FEEDING state cells
        queue = self._thread_container.queue
        packet = Packets.AskTIDs(first, last, INVALID_PARTITION)
        tid_set = set()
        for storage_node in self.pt.getNodeSet(True):
            conn = self.getStorageConnection(storage_node)
            if conn is None:
                continue
            conn.ask(packet, queue=queue, tid_set=tid_set)

        # Wait for answers from all storages.
        # TODO: Results are incomplete when readable cells move concurrently
        #       from one storage to another. We detect when this happens and
        #       retry.
        self.waitResponses(queue)

        # Reorder tids
        ordered_tids = sorted(tid_set, reverse=True)
        logging.debug("UndoLog tids %s", map(dump, ordered_tids))
        # For each transaction, get info
        undo_info = []
        append = undo_info.append
        for tid in ordered_tids:
            (txn_info, txn_ext) = self._getTransactionInformation(tid)
            if filter is None or filter(txn_info):
                txn_info.pop('packed')
                txn_info.pop("oids")
                if txn_ext:
                    txn_info.update(loads(txn_ext))
                append(txn_info)
                if len(undo_info) >= last - first:
                    break
        # Check we return at least one element, otherwise call
        # again but extend offset
        if len(undo_info) == 0 and not block:
            undo_info = self.undoLog(first=first,
                                     last=last * 5,
                                     filter=filter,
                                     block=1)
        return undo_info
Beispiel #7
0
 def history(self, oid, size=1, filter=None):
     packet = Packets.AskObjectHistory(oid, 0, size)
     result = []
     # history_list is already sorted descending (by the storage)
     for serial, size in self._askStorageForRead(oid, packet):
             txn_info, txn_ext = self._getTransactionInformation(serial)
             # create history dict
             del txn_info['id']
             del txn_info['oids']
             del txn_info['packed']
             txn_info['tid'] = serial
             txn_info['version'] = ''
             txn_info['size'] = size
             if filter is None or filter(txn_info):
                 result.append(txn_info)
             if txn_ext:
                 txn_info.update(loads(txn_ext))
     return result
Beispiel #8
0
 def transactionLog(self, start, stop, limit):
     tid_list = []
     # request a tid list for each partition
     for offset in xrange(self.pt.getPartitions()):
         r = self._askStorageForRead(offset,
             Packets.AskTIDsFrom(start, stop, limit, offset))
         if r:
             tid_list = list(heapq.merge(tid_list, r))
             if len(tid_list) >= limit:
                 del tid_list[limit:]
                 stop = tid_list[-1]
     # request transactions informations
     txn_list = []
     append = txn_list.append
     tid = None
     for tid in tid_list:
         (txn_info, txn_ext) = self._getTransactionInformation(tid)
         txn_info['ext'] = loads(txn_ext) if txn_ext else {}
         append(txn_info)
     return (tid, txn_list)
Beispiel #9
0
def serializeext(ext):
    # ZODB iteration API gives us depickled extensions and only that.
    # So for dumping in raw form we need to pickle it back hopefully getting
    # something close to original raw data.

    if not ext:
        # ZODB usually does this: encode {} as empty "", not as "}."
        # https://github.com/zopefoundation/ZODB/blob/2490ae09/src/ZODB/BaseStorage.py#L194
        #
        # and here are decoders:
        # https://github.com/zopefoundation/ZODB/blob/2490ae09/src/ZODB/FileStorage/FileStorage.py#L1145
        # https://github.com/zopefoundation/ZODB/blob/2490ae09/src/ZODB/FileStorage/FileStorage.py#L1990
        # https://github.com/zopefoundation/ZODB/blob/2490ae09/src/ZODB/fstools.py#L66
        # ...
        return b""

    buf = BytesIO()
    p = XPickler(buf, _protocol)
    p.dump(ext)
    out = buf.getvalue()
    #out = pickletools.optimize(out) # remove unneeded PUT opcodes
    assert loads(out) == ext
    return out
Beispiel #10
0
 def __init__(self, db, tid):
     self._oid_list, user, desc, ext, _, _ = db.getTransaction(tid)
     super(TransactionRecord, self).__init__(tid, ' ', user, desc,
                                             loads(ext) if ext else {})
     self._db = db
Beispiel #11
0
def read_txn_header(f, pos, file_size, outp, ltid):
    # Read the transaction record
    f.seek(pos)
    h = f.read(23)
    if len(h) < 23:
        raise EOFError

    tid, stl, status, ul, dl, el = unpack(">8s8scHHH", h)
    status = as_text(status)
    tl = u64(stl)

    if pos + (tl + 8) > file_size:
        error("bad transaction length at %s", pos)

    if tl < (23 + ul + dl + el):
        error("invalid transaction length, %s, at %s", tl, pos)

    if ltid and tid < ltid:
        error("time-stamp reducation %s < %s, at %s", u64(tid), u64(ltid), pos)

    if status == "c":
        truncate(f, pos, file_size, outp)
        raise EOFError

    if status not in " up":
        error("invalid status, %r, at %s", status, pos)

    tpos = pos
    tend = tpos + tl

    if status == "u":
        # Undone transaction, skip it
        f.seek(tend)
        h = f.read(8)
        if h != stl:
            error("inconsistent transaction length at %s", pos)
        pos = tend + 8
        return pos, None, tid

    pos = tpos + (23 + ul + dl + el)
    user = f.read(ul)
    description = f.read(dl)
    if el:
        try:
            e = loads(f.read(el))
        except:
            e = {}
    else:
        e = {}

    result = TransactionRecord(tid, status, user, description, e, pos, tend, f,
                               tpos)
    pos = tend

    # Read the (intentionally redundant) transaction length
    f.seek(pos)
    h = f.read(8)
    if h != stl:
        error("redundant transaction length check failed at %s", pos)
    pos += 8

    return pos, result, tid
Beispiel #12
0
 def checkPackOnlyOneObject(self):
     eq = self.assertEqual
     raises = self.assertRaises
     loads = self._makeloader()
     # Create a root object.  This can't be an instance of Object,
     # otherwise the pickling machinery will serialize it as a persistent
     # id and not as an object that contains references (persistent ids) to
     # other objects.
     root = Root()
     # Create a persistent object, with some initial state
     obj1 = self._newobj()
     oid1 = obj1.getoid()
     # Create another persistent object, with some initial state.
     obj2 = self._newobj()
     oid2 = obj2.getoid()
     # Link the root object to the persistent objects, in order to keep
     # them alive.  Store the root object.
     root.obj1 = obj1
     root.obj2 = obj2
     root.value = 0
     revid0 = self._dostoreNP(ZERO, data=dumps(root))
     # Make sure the root can be retrieved
     data, revid = self._storage.load(ZERO, '')
     eq(revid, revid0)
     eq(loads(data).value, 0)
     # Commit three different revisions of the first object
     obj1.value = 1
     revid1 = self._dostoreNP(oid1, data=pdumps(obj1))
     obj1.value = 2
     revid2 = self._dostoreNP(oid1, revid=revid1, data=pdumps(obj1))
     obj1.value = 3
     revid3 = self._dostoreNP(oid1, revid=revid2, data=pdumps(obj1))
     # Now make sure all three revisions can be extracted
     data = self._storage.loadSerial(oid1, revid1)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 1)
     data = self._storage.loadSerial(oid1, revid2)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 2)
     data = self._storage.loadSerial(oid1, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 3)
     # Now commit a revision of the second object
     obj2.value = 11
     revid4 = self._dostoreNP(oid2, data=pdumps(obj2))
     # And make sure the revision can be extracted
     data = self._storage.loadSerial(oid2, revid4)
     pobj = loads(data)
     eq(pobj.getoid(), oid2)
     eq(pobj.value, 11)
     # Now pack just revisions 1 and 2 of object1.  Object1's current
     # revision should stay alive because it's pointed to by the root, as
     # should Object2's current revision.
     now = packtime = time.time()
     while packtime <= now:
         packtime = time.time()
     self._storage.pack(packtime, referencesf)
     # Make sure the revisions are gone, but that object zero, object2, and
     # revision 3 of object1 are still there and correct.
     data, revid = self._storage.load(ZERO, '')
     eq(revid, revid0)
     eq(loads(data).value, 0)
     raises(KeyError, self._storage.loadSerial, oid1, revid1)
     raises(KeyError, self._storage.loadSerial, oid1, revid2)
     data = self._storage.loadSerial(oid1, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 3)
     data, revid = self._storage.load(oid1, '')
     eq(revid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 3)
     data, revid = self._storage.load(oid2, '')
     eq(revid, revid4)
     eq(loads(data).value, 11)
     data = self._storage.loadSerial(oid2, revid4)
     pobj = loads(data)
     eq(pobj.getoid(), oid2)
     eq(pobj.value, 11)
Beispiel #13
0
 def checkPackJustOldRevisions(self):
     eq = self.assertEqual
     raises = self.assertRaises
     loads = self._makeloader()
     # Create a root object.  This can't be an instance of Object,
     # otherwise the pickling machinery will serialize it as a persistent
     # id and not as an object that contains references (persistent ids) to
     # other objects.
     root = Root()
     # Create a persistent object, with some initial state
     obj = self._newobj()
     oid = obj.getoid()
     # Link the root object to the persistent object, in order to keep the
     # persistent object alive.  Store the root object.
     root.obj = obj
     root.value = 0
     revid0 = self._dostoreNP(ZERO, data=dumps(root))
     # Make sure the root can be retrieved
     data, revid = self._storage.load(ZERO, '')
     eq(revid, revid0)
     eq(loads(data).value, 0)
     # Commit three different revisions of the other object
     obj.value = 1
     revid1 = self._dostoreNP(oid, data=pdumps(obj))
     obj.value = 2
     revid2 = self._dostoreNP(oid, revid=revid1, data=pdumps(obj))
     obj.value = 3
     revid3 = self._dostoreNP(oid, revid=revid2, data=pdumps(obj))
     # Now make sure all three revisions can be extracted
     data = self._storage.loadSerial(oid, revid1)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 1)
     data = self._storage.loadSerial(oid, revid2)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 2)
     data = self._storage.loadSerial(oid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)
     # Now pack just revisions 1 and 2.  The object's current revision
     # should stay alive because it's pointed to by the root.
     now = packtime = time.time()
     while packtime <= now:
         packtime = time.time()
     self._storage.pack(packtime, referencesf)
     # Make sure the revisions are gone, but that object zero and revision
     # 3 are still there and correct
     data, revid = self._storage.load(ZERO, '')
     eq(revid, revid0)
     eq(loads(data).value, 0)
     raises(KeyError, self._storage.loadSerial, oid, revid1)
     raises(KeyError, self._storage.loadSerial, oid, revid2)
     data = self._storage.loadSerial(oid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)
     data, revid = self._storage.load(oid, '')
     eq(revid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)
Beispiel #14
0
 def checkPackOnlyOneObject(self):
     eq = self.assertEqual
     raises = self.assertRaises
     loads = self._makeloader()
     # Create a root object.  This can't be an instance of Object,
     # otherwise the pickling machinery will serialize it as a persistent
     # id and not as an object that contains references (persistent ids) to
     # other objects.
     root = Root()
     # Create a persistent object, with some initial state
     obj1 = self._newobj()
     oid1 = obj1.getoid()
     # Create another persistent object, with some initial state.
     obj2 = self._newobj()
     oid2 = obj2.getoid()
     # Link the root object to the persistent objects, in order to keep
     # them alive.  Store the root object.
     root.obj1 = obj1
     root.obj2 = obj2
     root.value = 0
     revid0 = self._dostoreNP(ZERO, data=dumps(root))
     # Make sure the root can be retrieved
     data, revid = load_current(self._storage, ZERO)
     eq(revid, revid0)
     eq(loads(data).value, 0)
     # Commit three different revisions of the first object
     obj1.value = 1
     revid1 = self._dostoreNP(oid1, data=pdumps(obj1))
     obj1.value = 2
     revid2 = self._dostoreNP(oid1, revid=revid1, data=pdumps(obj1))
     obj1.value = 3
     revid3 = self._dostoreNP(oid1, revid=revid2, data=pdumps(obj1))
     # Now make sure all three revisions can be extracted
     data = self._storage.loadSerial(oid1, revid1)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 1)
     data = self._storage.loadSerial(oid1, revid2)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 2)
     data = self._storage.loadSerial(oid1, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 3)
     # Now commit a revision of the second object
     obj2.value = 11
     revid4 = self._dostoreNP(oid2, data=pdumps(obj2))
     # And make sure the revision can be extracted
     data = self._storage.loadSerial(oid2, revid4)
     pobj = loads(data)
     eq(pobj.getoid(), oid2)
     eq(pobj.value, 11)
     # Now pack just revisions 1 and 2 of object1.  Object1's current
     # revision should stay alive because it's pointed to by the root, as
     # should Object2's current revision.
     now = packtime = time.time()
     while packtime <= now:
         packtime = time.time()
     self._storage.pack(packtime, referencesf)
     # Make sure the revisions are gone, but that object zero, object2, and
     # revision 3 of object1 are still there and correct.
     data, revid = load_current(self._storage, ZERO)
     eq(revid, revid0)
     eq(loads(data).value, 0)
     raises(KeyError, self._storage.loadSerial, oid1, revid1)
     raises(KeyError, self._storage.loadSerial, oid1, revid2)
     data = self._storage.loadSerial(oid1, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 3)
     data, revid = load_current(self._storage, oid1)
     eq(revid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid1)
     eq(pobj.value, 3)
     data, revid = load_current(self._storage, oid2)
     eq(revid, revid4)
     eq(loads(data).value, 11)
     data = self._storage.loadSerial(oid2, revid4)
     pobj = loads(data)
     eq(pobj.getoid(), oid2)
     eq(pobj.value, 11)
Beispiel #15
0
 def checkPackJustOldRevisions(self):
     eq = self.assertEqual
     raises = self.assertRaises
     loads = self._makeloader()
     # Create a root object.  This can't be an instance of Object,
     # otherwise the pickling machinery will serialize it as a persistent
     # id and not as an object that contains references (persistent ids) to
     # other objects.
     root = Root()
     # Create a persistent object, with some initial state
     obj = self._newobj()
     oid = obj.getoid()
     # Link the root object to the persistent object, in order to keep the
     # persistent object alive.  Store the root object.
     root.obj = obj
     root.value = 0
     revid0 = self._dostoreNP(ZERO, data=dumps(root))
     # Make sure the root can be retrieved
     data, revid = load_current(self._storage, ZERO)
     eq(revid, revid0)
     eq(loads(data).value, 0)
     # Commit three different revisions of the other object
     obj.value = 1
     revid1 = self._dostoreNP(oid, data=pdumps(obj))
     obj.value = 2
     revid2 = self._dostoreNP(oid, revid=revid1, data=pdumps(obj))
     obj.value = 3
     revid3 = self._dostoreNP(oid, revid=revid2, data=pdumps(obj))
     # Now make sure all three revisions can be extracted
     data = self._storage.loadSerial(oid, revid1)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 1)
     data = self._storage.loadSerial(oid, revid2)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 2)
     data = self._storage.loadSerial(oid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)
     # Now pack just revisions 1 and 2.  The object's current revision
     # should stay alive because it's pointed to by the root.
     now = packtime = time.time()
     while packtime <= now:
         packtime = time.time()
     self._storage.pack(packtime, referencesf)
     # Make sure the revisions are gone, but that object zero and revision
     # 3 are still there and correct
     data, revid = load_current(self._storage, ZERO)
     eq(revid, revid0)
     eq(loads(data).value, 0)
     raises(KeyError, self._storage.loadSerial, oid, revid1)
     raises(KeyError, self._storage.loadSerial, oid, revid2)
     data = self._storage.loadSerial(oid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)
     data, revid = load_current(self._storage, oid)
     eq(revid, revid3)
     pobj = loads(data)
     eq(pobj.getoid(), oid)
     eq(pobj.value, 3)
Beispiel #16
0
 def extension(self):
     if not self.extension_bytes:
         return {}
     return loads(self.extension_bytes)