def checkLoadBeforeConsecutiveTids(self): eq = self.assertEqual oid = self._storage.new_oid() def helper(tid, revid, x): data = zodb_pickle(MinPO(x)) t = transaction.Transaction() try: self._storage.tpc_begin(t, p64(tid)) r1 = self._storage.store(oid, revid, data, '', t) # Finish the transaction r2 = self._storage.tpc_vote(t) newrevid = handle_serials(oid, r1, r2) self._storage.tpc_finish(t) except: self._storage.tpc_abort(t) raise return newrevid revid1 = helper(1, None, 1) revid2 = helper(2, revid1, 2) revid3 = helper(3, revid2, 3) data, start_tid, end_tid = self._storage.loadBefore(oid, p64(2)) eq(zodb_unpickle(data), MinPO(1)) eq(u64(start_tid), 1) eq(u64(end_tid), 2)
def get_invalidations(self, storage_id, tid): """Return a tid and list of all objects invalidation since tid. The tid is the most recent transaction id seen by the client. Returns None if it is unable to provide a complete list of invalidations for tid. In this case, client should do full cache verification. """ invq = self.invq[storage_id] # We make a copy of invq because it might be modified by a # foreign (other than main thread) calling invalidate above. invq = invq[:] if not invq: log("invq empty") return None, [] earliest_tid = invq[-1][0] if earliest_tid > tid: log("tid to old for invq %s < %s" % (u64(tid), u64(earliest_tid))) return None, [] oids = {} for _tid, L in invq: if _tid <= tid: break for key in L: oids[key] = 1 latest_tid = invq[0][0] return latest_tid, oids.keys()
def fsdump(path, file=None, with_offset=1): iter = FileIterator(path) for i, trans in enumerate(iter): if with_offset: print(("Trans #%05d tid=%016x time=%s offset=%d" % (i, u64(trans.tid), TimeStamp(trans.tid), trans._pos)), file=file) else: print(("Trans #%05d tid=%016x time=%s" % (i, u64(trans.tid), TimeStamp(trans.tid))), file=file) print((" status=%r user=%r description=%r" % (trans.status, trans.user, trans.description)), file=file) for j, rec in enumerate(trans): if rec.data is None: fullclass = "undo or abort of object creation" size = "" else: modname, classname = get_pickle_metadata(rec.data) size = " size=%d" % len(rec.data) fullclass = "%s.%s" % (modname, classname) if rec.data_txn: # It would be nice to print the transaction number # (i) but it would be expensive to keep track of. bp = " bp=%016x" % u64(rec.data_txn) else: bp = "" print((" data #%05d oid=%016x%s class=%s%s" % (j, u64(rec.oid), size, fullclass, bp)), file=file) iter.close()
def listHistory(self): """List transactions that modified a persistent object.""" state = self._loadHistoricalState() results = [] for n, d in enumerate(self.history): utc_timestamp = str(time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(d['time']))) local_timestamp = str(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(d['time']))) try: user_location, user_id = d['user_name'].split() except ValueError: user_location = None user_id = d['user_name'] url = self.getUrl(tid=u64(d['tid'])) current = (d['tid'] == self.state.tid and self.state.requestedTid is not None) curState = state[n]['state'] oldState = state[n + 1]['state'] diff = compareDictsHTML(curState, oldState, d['tid']) results.append(dict(utid=u64(d['tid']), href=url, current=current, error=state[n]['error'], diff=diff, user_id=user_id, user_location=user_location, utc_timestamp=utc_timestamp, local_timestamp=local_timestamp, **d)) # number in reverse order for i in range(len(results)): results[i]['index'] = len(results) - i return results
def loadBefore(self, oid, before_tid): noncurrent_for_oid = self.noncurrent.get(u64(oid)) if noncurrent_for_oid is None: self._trace(0x24, oid, "", before_tid) return None items = noncurrent_for_oid.items(None, u64(before_tid)-1) if not items: self._trace(0x24, oid, "", before_tid) return None tid, ofs = items[-1] self.f.seek(ofs) read = self.f.read assert read(1) == 'a', (ofs, self.f.tell(), oid, before_tid) size, saved_oid, saved_tid, end_tid, lver, ldata = unpack( ">I8s8s8sHI", read(34)) assert saved_oid == oid, (ofs, self.f.tell(), oid, saved_oid) assert saved_tid == p64(tid), (ofs, self.f.tell(), oid, saved_tid, tid) assert end_tid != z64, (ofs, self.f.tell(), oid) assert lver == 0, "Versions aren't supported" data = read(ldata) assert len(data) == ldata, (ofs, self.f.tell()) assert read(8) == oid, (ofs, self.f.tell(), oid) if end_tid < before_tid: self._trace(0x24, oid, "", before_tid) return None self._n_accesses += 1 self._trace(0x26, oid, "", saved_tid) return data, saved_tid, end_tid
def loadBefore(self, oid, before_tid): noncurrent_for_oid = self.noncurrent.get(u64(oid)) if noncurrent_for_oid is None: self._trace(0x24, oid, "", before_tid) return None items = noncurrent_for_oid.items(None, u64(before_tid)-1) if not items: self._trace(0x24, oid, "", before_tid) return None tid, ofs = items[-1] self.f.seek(ofs) read = self.f.read assert read(1) == 'a', (ofs, self.f.tell(), oid, before_tid) size, saved_oid, saved_tid, end_tid, lver, ldata = unpack( ">I8s8s8shI", read(34)) assert saved_oid == oid, (ofs, self.f.tell(), oid, saved_oid) assert saved_tid == p64(tid), (ofs, self.f.tell(), oid, saved_tid, tid) assert lver == 0, (ofs, self.f.tell()) assert end_tid != z64, (ofs, self.f.tell(), oid) data = read(ldata) assert len(data) == ldata, (ofs, self.f.tell()) assert read(8) == oid, (ofs, self.f.tell(), oid) if end_tid < before_tid: self._trace(0x24, oid, "", before_tid) return None self._n_accesses += 1 self._trace(0x26, oid, "", saved_tid) return data, saved_tid, end_tid
def checkLoadBefore(self): # Store 10 revisions of one object and then make sure that we # can get all the non-current revisions back. oid = self._storage.new_oid() revs = [] revid = None for i in range(10): # We need to ensure that successive timestamps are at least # two apart, so that a timestamp exists that's unambiguously # between successive timestamps. Each call to snooze() # guarantees that the next timestamp will be at least one # larger (and probably much more than that) than the previous # one. snooze() snooze() revid = self._dostore(oid, revid, data=MinPO(i)) revs.append(load_current(self._storage, oid)) prev = u64(revs[0][1]) for i in range(1, 10): tid = revs[i][1] cur = u64(tid) middle = prev + (cur - prev) // 2 assert prev < middle < cur # else the snooze() trick failed prev = cur t = self._storage.loadBefore(oid, p64(middle)) self.assertTrue(t is not None) data, start, end = t self.assertEqual(revs[i-1][0], data) self.assertEqual(tid, end)
def checkKnownConstants(self): self.assertEquals("\000\000\000\000\000\000\000\001", p64(1)) self.assertEquals("\000\000\000\001\000\000\000\000", p64(1L<<32)) self.assertEquals(u64("\000\000\000\000\000\000\000\001"), 1) self.assertEquals(U64("\000\000\000\000\000\000\000\001"), 1) self.assertEquals(u64("\000\000\000\001\000\000\000\000"), 1L<<32) self.assertEquals(U64("\000\000\000\001\000\000\000\000"), 1L<<32)
def get_invalidations(self, tid): """Return a tid and list of all objects invalidation since tid. The tid is the most recent transaction id seen by the client. Returns None if it is unable to provide a complete list of invalidations for tid. In this case, client should do full cache verification. """ if not self.invq: log("invq empty") return None, [] earliest_tid = self.invq[-1][0] if earliest_tid > tid: log("tid to old for invq %s < %s" % (u64(tid), u64(earliest_tid))) return None, [] oids = {} for _tid, L in self.invq: if _tid <= tid: break for key in L: oids[key] = 1 latest_tid = self.invq[0][0] return latest_tid, oids.keys()
def checkKnownConstants(self): self.assertEquals("\000\000\000\000\000\000\000\001", p64(1)) self.assertEquals("\000\000\000\001\000\000\000\000", p64(1L << 32)) self.assertEquals(u64("\000\000\000\000\000\000\000\001"), 1) self.assertEquals(U64("\000\000\000\000\000\000\000\001"), 1) self.assertEquals(u64("\000\000\000\001\000\000\000\000"), 1L << 32) self.assertEquals(U64("\000\000\000\001\000\000\000\000"), 1L << 32)
def test_KnownConstants(self): self.assertEqual(b"\000\000\000\000\000\000\000\001", p64(1)) self.assertEqual(b"\000\000\000\001\000\000\000\000", p64(1 << 32)) self.assertEqual(u64(b"\000\000\000\000\000\000\000\001"), 1) self.assertEqual(U64(b"\000\000\000\000\000\000\000\001"), 1) self.assertEqual(u64(b"\000\000\000\001\000\000\000\000"), 1 << 32) self.assertEqual(U64(b"\000\000\000\001\000\000\000\000"), 1 << 32)
def invalidate(self, oid, tid, server_invalidation=True): if tid is not None: if tid > self.tid: self.setLastTid(tid) elif tid < self.tid: if server_invalidation: raise ValueError("invalidation tid (%s) must not be less" " than previous one (%s)" % (u64(tid), u64(self.tid))) ofs = self.current.get(oid) if ofs is None: # 0x10 == invalidate (miss) self._trace(0x10, oid, tid) return self.f.seek(ofs) read = self.f.read assert read(1) == 'a', (ofs, self.f.tell(), oid) size, saved_oid, saved_tid, end_tid = unpack(">I8s8s8s", read(28)) assert saved_oid == oid, (ofs, self.f.tell(), oid, saved_oid) assert end_tid == z64, (ofs, self.f.tell(), oid) del self.current[oid] if tid is None: self.f.seek(ofs) self.f.write('f'+pack(">I", size)) # 0x1E = invalidate (hit, discarding current or non-current) self._trace(0x1E, oid, tid) self._len -= 1 else: self.f.seek(ofs+21) self.f.write(tid) self._set_noncurrent(oid, saved_tid, ofs) # 0x1C = invalidate (hit, saving non-current) self._trace(0x1C, oid, tid)
def listHistory(self): """List transactions that modified a persistent object.""" state = self._loadHistoricalState() results = [] for n, d in enumerate(self.history): short = (str(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(d['time']))) + " " + d['user_name'] + " " + d['description']) url = self.getUrl(tid=u64(d['tid'])) current = (d['tid'] == self.state.tid and self.state.requestedTid is not None) curState = state[n]['state'] oldState = state[n + 1]['state'] diff = compareDictsHTML(curState, oldState, d['tid']) results.append(dict(short=short, utid=u64(d['tid']), href=url, current=current, error=state[n]['error'], diff=diff, **d)) # number in reverse order for i in range(len(results)): results[i]['index'] = len(results) - i return results
def listHistory(self): if 'tid' in self.request: requested_tid = p64(int(self.request['tid'], 0)) else: requested_tid = None results = [] for n, d in enumerate(self.history[self.first_idx:self.last_idx]): utid = u64(d.tid) ts = TimeStamp(d.tid).timeTime() utc_timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(ts)) local_timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(ts)) try: user_location, user_id = d.user.split() except ValueError: user_location = None user_id = d.user try: size = d._tend - d._tpos except AttributeError: size = None ext = d.extension if isinstance(d.extension, dict) else {} objects = [] for record in d: obj = self.jar.get(record.oid) url = "@@zodbbrowser?oid=0x%x&tid=0x%x" % (u64(record.oid), utid) objects.append(dict( oid=u64(record.oid), path=getObjectPath(obj, d.tid), oid_repr=oid_repr(record.oid), class_repr=getObjectType(obj), url=url, repr=IValueRenderer(obj).render(d.tid), )) if len(objects) == 1: summary = '1 object record' else: summary = '%d object records' % len(objects) if size is not None: summary += ' (%d bytes)' % size results.append(dict( index=(self.first_idx + n + 1), utc_timestamp=utc_timestamp, local_timestamp=local_timestamp, user_id=user_id, user_location=user_location, description=d.description, utid=utid, current=(d.tid == requested_tid), href=self.getUrl(tid=utid), size=size, summary=summary, hidden=(len(objects) > 5), objects=objects, **ext )) if results and not requested_tid and self.page == 0: results[-1]['current'] = True return results[::-1]
def checkLoadBefore(self): # Store 10 revisions of one object and then make sure that we # can get all the non-current revisions back. oid = self._storage.new_oid() revs = [] revid = None for i in range(10): # We need to ensure that successive timestamps are at least # two apart, so that a timestamp exists that's unambiguously # between successive timestamps. Each call to snooze() # guarantees that the next timestamp will be at least one # larger (and probably much more than that) than the previous # one. snooze() snooze() revid = self._dostore(oid, revid, data=MinPO(i)) revs.append(self._storage.loadEx(oid, "")) prev = u64(revs[0][1]) for i in range(1, 10): tid = revs[i][1] cur = u64(tid) middle = prev + (cur - prev) // 2 assert prev < middle < cur # else the snooze() trick failed prev = cur t = self._storage.loadBefore(oid, p64(middle)) self.assert_(t is not None) data, start, end = t self.assertEqual(revs[i - 1][0], data) self.assertEqual(tid, end)
def test_KnownConstants(self): self.assertEqual(b"\000\000\000\000\000\000\000\001", p64(1)) self.assertEqual(b"\000\000\000\001\000\000\000\000", p64(1<<32)) self.assertEqual(u64(b"\000\000\000\000\000\000\000\001"), 1) self.assertEqual(U64(b"\000\000\000\000\000\000\000\001"), 1) self.assertEqual(u64(b"\000\000\000\001\000\000\000\000"), 1<<32) self.assertEqual(U64(b"\000\000\000\001\000\000\000\000"), 1<<32)
def read_txn_header(f, pos, file_size, outp, ltid): # Read the transaction record f.seek(pos) h = f.read(23) if len(h) < 23: raise EOFError tid, stl, status, ul, dl, el = unpack(">8s8scHHH",h) status = as_text(status) tl = u64(stl) if pos + (tl + 8) > file_size: error("bad transaction length at %s", pos) if tl < (23 + ul + dl + el): error("invalid transaction length, %s, at %s", tl, pos) if ltid and tid < ltid: error("time-stamp reducation %s < %s, at %s", u64(tid), u64(ltid), pos) if status == "c": truncate(f, pos, file_size, outp) raise EOFError if status not in " up": error("invalid status, %r, at %s", status, pos) tpos = pos tend = tpos + tl if status == "u": # Undone transaction, skip it f.seek(tend) h = f.read(8) if h != stl: error("inconsistent transaction length at %s", pos) pos = tend + 8 return pos, None, tid pos = tpos+(23+ul+dl+el) user = f.read(ul) description = f.read(dl) if el: try: e = loads(f.read(el)) except: e = {} else: e = {} result = TransactionRecord(tid, status, user, description, e, pos, tend, f, tpos) pos = tend # Read the (intentionally redundant) transaction length f.seek(pos) h = f.read(8) if h != stl: error("redundant transaction length check failed at %s", pos) pos += 8 return pos, result, tid
def setLastTid(self, tid): if (self.tid is not None) and (tid <= self.tid) and self: raise ValueError("new last tid (%s) must be greater than " "previous one (%s)" % (u64(tid), u64(self.tid))) assert isinstance(tid, str) and len(tid) == 8, tid self.tid = tid self.cache.set('LAST_TID',tid)
def testPrimitiveMethods(self): view = self._zodbInfoView(self.root, TestRequest()) self.assertEquals(view.getObjectId(), u64(self.root._p_oid)) self.assertTrue('PersistentMapping' in view.getObjectType()) self.assertEquals(view.getStateTid(), u64(ZodbObjectState(self.root).tid)) self.assertEquals(view.getStateTidNice(), view._tidToTimestamp(ZodbObjectState(self.root).tid))
def download_blob(self, cursor, oid, serial, filename): """Download a blob into a file""" tmp_fn = filename + ".tmp" bytes = self.adapter.mover.download_blob(cursor, u64(oid), u64(serial), tmp_fn) if os.path.exists(tmp_fn): os.rename(tmp_fn, filename) self.cache_checker.loaded(bytes)
def testPrimitiveMethods(self): view = self._zodbInfoView(self.root, TestRequest()) self.assertEqual(view.getObjectId(), u64(self.root._p_oid)) self.assertTrue('PersistentMapping' in view.getObjectType()) self.assertEqual(view.getStateTid(), u64(ZodbObjectState(self.root).tid)) self.assertEqual(view.getStateTidNice(), view._tidToTimestamp(ZodbObjectState(self.root).tid))
def download_blob(self, cursor, oid, serial, filename): """Download a blob into a file""" tmp_fn = filename + ".tmp" bytes = self.adapter.mover.download_blob( cursor, u64(oid), u64(serial), tmp_fn) if os.path.exists(tmp_fn): os.rename(tmp_fn, filename) self.cache_checker.loaded(bytes)
def _del_noncurrent(self, oid, tid): try: noncurrent_for_oid = self.noncurrent[u64(oid)] del noncurrent_for_oid[u64(tid)] if not noncurrent_for_oid: del self.noncurrent[u64(oid)] except KeyError: logger.error("Couldn't find non-current %r", (oid, tid))
def read_txn_header(f, pos, file_size, outp, ltid): # Read the transaction record f.seek(pos) h = f.read(23) if len(h) < 23: raise EOFError tid, stl, status, ul, dl, el = unpack(">8s8scHHH",h) tl = u64(stl) if pos + (tl + 8) > file_size: error("bad transaction length at %s", pos) if tl < (23 + ul + dl + el): error("invalid transaction length, %s, at %s", tl, pos) if ltid and tid < ltid: error("time-stamp reducation %s < %s, at %s", u64(tid), u64(ltid), pos) if status == "c": truncate(f, pos, file_size, outp) raise EOFError if status not in " up": error("invalid status, %r, at %s", status, pos) tpos = pos tend = tpos + tl if status == "u": # Undone transaction, skip it f.seek(tend) h = f.read(8) if h != stl: error("inconsistent transaction length at %s", pos) pos = tend + 8 return pos, None, tid pos = tpos+(23+ul+dl+el) user = f.read(ul) description = f.read(dl) if el: try: e=loads(f.read(el)) except: e={} else: e={} result = RecordIterator(tid, status, user, description, e, pos, tend, f, tpos) pos = tend # Read the (intentionally redundant) transaction length f.seek(pos) h = f.read(8) if h != stl: error("redundant transaction length check failed at %s", pos) pos += 8 return pos, result, tid
def _check_permissions(self, data, oid=None): if not ( data.endswith(self.user_id) or oid == self.user_id or oid == z64 ): raise StorageError( "Attempt to access encrypted data of others at <%s> by <%s>" % ( u64(oid), u64(self.user_id)))
def test_u64_bad_object(self): with self.assertRaises(ValueError) as exc: u64(b'123456789') e = exc.exception # The args will be whatever the struct.error args were, # which vary from version to version and across implementations, # followed by the bad value self.assertEqual(e.args[-1], b'123456789')
def settid(self, tid): if self.tid is not None and tid <= self.tid: raise ValueError("new last tid (%s) must be greater than " "previous one (%s)" % (u64(tid), u64(self.tid))) assert isinstance(tid, str) and len(tid) == 8 self.tid = tid self.f.seek(len(magic)) self.f.write(tid) self.f.flush()
def upload_blob(self, cursor, oid, serial, filename): """Upload a blob from a file. If serial is None, upload to the temporary table. """ if serial is not None: tid_int = u64(serial) else: tid_int = None self.adapter.mover.upload_blob(cursor, u64(oid), tid_int, filename)
def _read_header(self): self._file.seek(self._pos) self._hdr = self._file.read(DATA_HDR_LEN) # always read the longer header, just in case (self.oid, self.serial, prev_rec_pos, txn_pos, vlen, data_len) = struct.unpack(DATA_HDR, self._hdr[:DATA_HDR_LEN]) assert not vlen self.prev_rec_pos = u64(prev_rec_pos) self.txn_pos = u64(txn_pos) self.data_len = u64(data_len)
def _read_header(self): self._file.seek(self._pos) self._hdr = self._file.read(DATA_HDR_LEN) # always read the longer header, just in case (self.oid, self.serial, prev_rec_pos, txn_pos, vlen, data_len ) = struct.unpack(DATA_HDR, self._hdr[:DATA_HDR_LEN]) assert not vlen self.prev_rec_pos = u64(prev_rec_pos) self.txn_pos = u64(txn_pos) self.data_len = u64(data_len)
def setLastTid(self, tid): if (self.tid is not None) and (tid <= self.tid) and self: raise ValueError("new last tid (%s) must be greater than " "previous one (%s)" % (u64(tid), u64(self.tid))) assert isinstance(tid, str) and len(tid) == 8, tid self.tid = tid self.f.seek(len(magic)) self.f.write(tid) self.f.flush()
def invalidate(self, oid, tid, server_invalidation=True): if tid is not None: if tid > self.tid: self.setLastTid(tid) elif tid < self.tid: if server_invalidation: raise ValueError("invalidation tid (%s) must not be less" " than previous one (%s)" % (u64(tid), u64(self.tid))) self.cache.delete(keyify(oid)) self._n_items -= 1
def setLastTid(self, tid): if (not tid) or (tid == z64): return if (tid <= self.tid) and self._len: if tid == self.tid: return # Be a little forgiving raise ValueError("new last tid (%s) must be greater than " "previous one (%s)" % (u64(tid), u64(self.tid))) assert isinstance(tid, str) and len(tid) == 8, tid self.tid = tid self.f.seek(len(magic)) self.f.write(tid) self.f.flush()
def loadBefore(self, oid, before_tid): with self._lock: noncurrent_for_oid = self.noncurrent.get(u64(oid)) if noncurrent_for_oid is None: result = self.load(oid, before_tid) if result: return result[0], result[1], None else: self._trace(0x24, oid, "", before_tid) return result items = noncurrent_for_oid.items(None, u64(before_tid) - 1) if not items: result = self.load(oid, before_tid) if result: return result[0], result[1], None else: self._trace(0x24, oid, "", before_tid) return result tid, ofs = items[-1] self.f.seek(ofs) read = self.f.read status = read(1) assert status == b'a', (ofs, self.f.tell(), oid, before_tid) size, saved_oid, saved_tid, end_tid, lver, ldata = unpack( ">I8s8s8sHI", read(34)) assert saved_oid == oid, (ofs, self.f.tell(), oid, saved_oid) assert saved_tid == p64(tid), (ofs, self.f.tell(), oid, saved_tid, tid) assert end_tid != z64, (ofs, self.f.tell(), oid) assert lver == 0, "Versions aren't supported" data = read(ldata) assert len(data) == ldata, (ofs, self.f.tell()) # WARNING: The following assert changes the file position. # We must not depend on this below or we'll fail in optimized mode. assert read(8) == oid, (ofs, self.f.tell(), oid) if end_tid < before_tid: result = self.load(oid, before_tid) if result: return result[0], result[1], None else: self._trace(0x24, oid, "", before_tid) return result self._n_accesses += 1 self._trace(0x26, oid, "", saved_tid) return data, saved_tid, end_tid
def loadBefore(self, oid, before_tid): with self._lock: noncurrent_for_oid = self.noncurrent.get(u64(oid)) if noncurrent_for_oid is None: result = self.load(oid, before_tid) if result: return result[0], result[1], None else: self._trace(0x24, oid, "", before_tid) return result items = noncurrent_for_oid.items(None, u64(before_tid)-1) if not items: result = self.load(oid, before_tid) if result: return result[0], result[1], None else: self._trace(0x24, oid, "", before_tid) return result tid, ofs = items[-1] self.f.seek(ofs) read = self.f.read status = read(1) assert status == b'a', (ofs, self.f.tell(), oid, before_tid) size, saved_oid, saved_tid, end_tid, lver, ldata = unpack( ">I8s8s8sHI", read(34)) assert saved_oid == oid, (ofs, self.f.tell(), oid, saved_oid) assert saved_tid == p64(tid), ( ofs, self.f.tell(), oid, saved_tid, tid) assert end_tid != z64, (ofs, self.f.tell(), oid) assert lver == 0, "Versions aren't supported" data = read(ldata) assert len(data) == ldata, (ofs, self.f.tell()) # WARNING: The following assert changes the file position. # We must not depend on this below or we'll fail in optimized mode. assert read(8) == oid, (ofs, self.f.tell(), oid) if end_tid < before_tid: result = self.load(oid, before_tid) if result: return result[0], result[1], None else: self._trace(0x24, oid, "", before_tid) return result self._n_accesses += 1 self._trace(0x26, oid, "", saved_tid) return data, saved_tid, end_tid
def setLastTid(self, tid): if (not tid) or (tid == z64): return if (tid <= self.tid) and self._len: if tid == self.tid: return # Be a little forgiving raise ValueError("new last tid (%s) must be greater than " "previous one (%s)" % (u64(tid), u64(self.tid))) assert isinstance(tid, bytes) and len(tid) == 8, tid self.tid = tid self.f.seek(len(magic)) self.f.write(tid) self.f.flush()
def load(self, oid, version=''): pos = self.index.get(oid) if pos is None: return self._storage.load(oid) self._file.seek(pos) h = self._file.read(8) oidlen = u64(h) read_oid = self._file.read(oidlen) if read_oid != oid: raise POSException.StorageSystemError('Bad temporary storage') h = self._file.read(16) size = u64(h[8:]) serial = h[:8] return self._file.read(size), serial
def deleteObject(self, cursor, oid, oldserial): # The only things to worry about are object_state and blob_chuck. # blob chunks are deleted automatically by a foreign key. # We shouldn't *have* to verify the oldserial in the delete statement, # because our only consumer is zc.zodbdgc which only calls us for # unreachable objects, so they shouldn't be modified and get a new # TID. But this is safer. state = """ DELETE FROM object_state WHERE zoid = %(oid)s and tid = %(tid)s """ self.runner.run_script_stmt(cursor, state, {'oid': u64(oid), 'tid': u64(oldserial)}) return cursor.rowcount
def after_tpc_finish(self, tid): """ Flush queued changes. This is called after the database commit lock is released, but before releasing the storage lock that will allow other threads to use this instance. """ tid_int = u64(tid) if self.checkpoints: for oid_int in self.temp_objects.stored_oids: # Future cache lookups for oid_int should now use # the tid just committed. We're about to flush that # data to the cache. self.delta_after0[oid_int] = tid_int # Under what circumstances would we get here (after commiting # a transaction) without ever having polled to establish # checkpoints? Turns out that database-level APIs like # db.undo() use new storage instances in an unusual way, and # will not necessarily have polled by the time they commit. # # Of course, if we restored from persistent cache files the master # could have checkpoints we copied down. # # TODO: Create a special subclass for MVCC instances and separate # the state handling. self._send_queue(tid)
def get_references(state): """Return the set of OIDs the given state refers to.""" refs = set() if state: for oid in referencesf(str(state)): refs.add(u64(oid)) return refs
def getInvalidations(self, tid): invtid, invlist = self.server.get_invalidations(tid) if invtid is None: return None self.log("Return %d invalidations up to tid %s" % (len(invlist), u64(invtid))) return invtid, invlist
def scan(f, pos): """Return a potential transaction location following pos in f. This routine scans forward from pos looking for the last data record in a transaction. A period '.' always occurs at the end of a pickle, and an 8-byte transaction length follows the last pickle. If a period is followed by a plausible 8-byte transaction length, assume that we have found the end of a transaction. The caller should try to verify that the returned location is actually a transaction header. """ while 1: f.seek(pos) data = f.read(8096) if not data: return 0 s = 0 while 1: l = data.find(b".", s) if l < 0: pos += len(data) break # If we are less than 8 bytes from the end of the # string, we need to read more data. s = l + 1 if s > len(data) - 8: pos += l break tl = u64(data[s:s + 8]) if tl < pos: return pos + s + 8
def test_LongToStringToLong(self): for num in self.all: s = p64(num) n = U64(s) self.assertEqual(num, n, "U64() failed") n2 = u64(s) self.assertEqual(num, n2, "u64() failed")
def dump_txn(self): pos = self.file.tell() h = self.file.read(TRANS_HDR_LEN) if not h: return False tid, tlen, status, ul, dl, el = struct.unpack(TRANS_HDR, h) end = pos + tlen print("=" * 60, file=self.dest) print("offset: %d" % pos, file=self.dest) print("end pos: %d" % end, file=self.dest) print("transaction id: %s" % fmt(tid), file=self.dest) print("trec len: %d" % tlen, file=self.dest) print("status: %r" % status, file=self.dest) user = descr = extra = "" if ul: user = self.file.read(ul) if dl: descr = self.file.read(dl) if el: extra = self.file.read(el) print("user: %r" % user, file=self.dest) print("description: %r" % descr, file=self.dest) print("len(extra): %d" % el, file=self.dest) while self.file.tell() < end: self.dump_data(pos) stlen = self.file.read(8) print("redundant trec len: %d" % u64(stlen), file=self.dest) return 1
def getInvalidations(self, tid): invtid, invlist = self.server.get_invalidations(self.storage_id, tid) if invtid is None: return None self.log("Return %d invalidations up to tid %s" % (len(invlist), u64(invtid))) return invtid, invlist
def get_references(state): """Return an iterable of the set of OIDs the given state refers to.""" if not state: return () assert isinstance(state, bytes), type(state) # XXX PY3: str(state) return {u64(oid) for oid in referencesf(state)}
def save_stacktrace(obj): """Stores an `AnnotatedTraceback` object that contains a formatted stack trace for the current frame and the OID of the object that has been modified for possible logging at a later point in time. """ global _tb_for_last_db_write tb_limit = 20 current_frame = inspect.currentframe() # Outer two frames are in this module, so they're not interesting frame = current_frame.f_back.f_back filename = frame.f_code.co_filename line_no = frame.f_lineno extracted_tb = traceback.extract_stack(frame, limit=tb_limit) oid = hex(u64(obj._p_oid)) instruction = AnnotatedTraceback(oid, filename, line_no, extracted_tb) # Write the traceback to the module global (in a thread-safe way) with tb_lock: _tb_for_last_db_write = instruction # Avoid leaking frames del current_frame del frame
def scan(f, pos): """Return a potential transaction location following pos in f. This routine scans forward from pos looking for the last data record in a transaction. A period '.' always occurs at the end of a pickle, and an 8-byte transaction length follows the last pickle. If a period is followed by a plausible 8-byte transaction length, assume that we have found the end of a transaction. The caller should try to verify that the returned location is actually a transaction header. """ while 1: f.seek(pos) data = f.read(8096) if not data: return 0 s = 0 while 1: l = data.find(".", s) if l < 0: pos += len(data) break # If we are less than 8 bytes from the end of the # string, we need to read more data. s = l + 1 if s > len(data) - 8: pos += l break tl = u64(data[s:s+8]) if tl < pos: return pos + s + 8
def getBlobFilePath(self, oid, tid): base, rem = divmod(utils.u64(oid), self.size) return os.path.join( str(rem), "%s.%s%s" % (base, hexlify(tid).decode('ascii'), ZODB.blob.BLOB_SUFFIX) )
def checkLoadBeforeUndo(self): # Do several transactions then undo them. oid = self._storage.new_oid() revid = None for i in range(5): revid = self._dostore(oid, revid, data=MinPO(i)) revs = [] for i in range(4): info = self._storage.undoInfo() tid = info[0]["id"] # Always undo the most recent txn, so the value will # alternate between 3 and 4. self._undo(tid, [oid], note="undo %d" % i) revs.append(self._storage.loadEx(oid, "")) prev_tid = None for i, (data, tid, ver) in enumerate(revs): t = self._storage.loadBefore(oid, p64(u64(tid) + 1)) self.assertEqual(data, t[0]) self.assertEqual(tid, t[1]) if prev_tid: self.assert_(prev_tid < t[1]) prev_tid = t[1] if i < 3: self.assertEqual(revs[i + 1][1], t[2]) else: self.assertEqual(None, t[2])
def getRootOid(self): root = self.jar.root() try: root = root[ZopePublication.root_name] except KeyError: pass return u64(root._p_oid)
def checkLongToStringToLong(self): for num in self.all: s = p64(num) n = U64(s) self.assertEquals(num, n, "U64() failed") n2 = u64(s) self.assertEquals(num, n2, "u64() failed")