def _check_drec(self, drec): # drec has members oid, tid, data, data_txn tid, oid, pick, pos = drec.tid, drec.oid, drec.data, drec.pos ref2name = self._ref2name ref2name_get = ref2name.get records_map_get = self._records_map.get if pick: oid_in_oids = oid in self.oids for ref, klass in get_refs(pick): if ref in self.oids: oidclass = ref2name_get(oid, None) if oidclass is None: ref2name[oid] = oidclass = get_class(pick) self._msg(ref, tid, "referenced by", oid_repr(oid), oidclass, "at", pos) if oid_in_oids: if klass is None: klass = ref2name_get(ref, None) if klass is None: r = records_map_get(ref, None) # For save memory we only save references # seen in one transaction with interesting # objects changes. So in some circumstances # we may still got "<unknown>" class name. if r is None: klass = "<unknown>" else: ref2name[ref] = klass = get_class(r.data) elif isinstance(klass, tuple): ref2name[ref] = klass = "%s.%s" % klass self._msg(oid, tid, "references", oid_repr(ref), klass, "at", pos)
def copy(source, dest, verbose=0): """Copy transactions from a source to a destination storage This is typically used for converting data from one storage to another. `source` must have an .iterator() method. """ _ts = None ok = 1 preindex = {}; preget = preindex.get # restore() is a new storage API method which has an identical # signature to store() except that it does not return anything. # Semantically, restore() is also identical to store() except that it # doesn't do the ConflictError or VersionLockError consistency # checks. The reason to use restore() over store() in this method is # that store() cannot be used to copy transactions spanning a version # commit or abort, or over transactional undos. # # We'll use restore() if it's available, otherwise we'll fall back to # using store(). However, if we use store, then # copyTransactionsFrom() may fail with VersionLockError or # ConflictError. restoring = hasattr(dest, 'restore') fiter = source.iterator() for transaction in fiter: tid = transaction.tid if _ts is None: _ts = TimeStamp(tid) else: t = TimeStamp(tid) if t <= _ts: if ok: print ('Time stamps out of order %s, %s' % (_ts, t)) ok = 0 _ts = t.laterThan(_ts) tid = `_ts` else: _ts = t if not ok: print ('Time stamps back in order %s' % (t)) ok = 1 if verbose: print _ts dest.tpc_begin(transaction, tid, transaction.status) for r in transaction: oid = r.oid if verbose: print oid_repr(oid), r.version, len(r.data) if restoring: dest.restore(oid, r.tid, r.data, r.version, r.data_txn, transaction) else: pre = preget(oid, None) s = dest.store(oid, pre, r.data, r.version, transaction) preindex[oid] = s dest.tpc_vote(transaction) dest.tpc_finish(transaction)
def checkTxn(self, th, pos): if th.tid <= self.ltid: self.fail(pos, "time-stamp reduction: %s <= %s", oid_repr(th.tid), oid_repr(self.ltid)) self.ltid = th.tid if th.status == "c": self.fail(pos, "transaction with checkpoint flag set") if not th.status in " pu": # recognize " ", "p", and "u" as valid self.fail(pos, "invalid transaction status: %r", th.status) if th.tlen < th.headerlen(): self.fail(pos, "invalid transaction header: " "txnlen (%d) < headerlen(%d)", th.tlen, th.headerlen())
def dump(self): from ZODB.utils import oid_repr print("cache size", len(self)) L = list(self.contents()) L.sort() for oid, tid in L: print(oid_repr(oid), oid_repr(tid)) print("dll contents") L = list(self) L.sort(lambda x, y: cmp(x.key, y.key)) for x in L: end_tid = x.end_tid or z64 print(oid_repr(x.key[0]), oid_repr(x.key[1]), oid_repr(end_tid)) print()
def _store(self, oid, serial, data, version): err = None try: newserial = self.storage.store(oid, serial, data, version, self.transaction) except (SystemExit, KeyboardInterrupt): raise except Exception, err: self.store_failed = 1 if isinstance(err, ConflictError): self.stats.conflicts += 1 self.log("conflict error oid=%s msg=%s" % (oid_repr(oid), str(err)), BLATHER) if not isinstance(err, TransactionError): # Unexpected errors are logged and passed to the client self.log("store error: %s, %s" % sys.exc_info()[:2], logging.ERROR, exc_info=True) # Try to pickle the exception. If it can't be pickled, # the RPC response would fail, so use something else. pickler = cPickle.Pickler() pickler.fast = 1 try: pickler.dump(err, 1) except: msg = "Couldn't pickle storage exception: %s" % repr(err) self.log(msg, logging.ERROR) err = StorageServerError(msg) # The exception is reported back as newserial for this oid newserial = err
def verify_record(oid, data, debug=False): input_file = io.BytesIO(data) unpickler = PersistentUnpickler(None, persistent_load, input_file) class_info = "unknown" pos = None try: class_info = unpickler.load() pos = input_file.tell() unpickler.load() except Exception as e: input_file.seek(0) pickle = input_file.read() logger.info("\nCould not process {} record {} ({!r}):".format( class_info, oid_repr(oid), oid)) logger.info(repr(pickle)) logger.info(traceback.format_exc()) if debug and pos is not None: try: pickletools.dis(pickle[pos:]) except Exception: # ignore exceptions while disassembling the pickle since the # real issue is that it references a unavailable module pass finally: pdb.set_trace() elif debug and pos is None: pdb.set_trace() # The same issues should have the same msg msg = "{}: {}".format(e.__class__.__name__, str(e)) return False, msg return True, None
def report(self): """Show all msgs, grouped by oid and sub-grouped by tid.""" msgs = self.msgs oids = self.oids oid2name = self.oid2name # First determine which oids weren't seen at all, and synthesize msgs # for them. NOT_SEEN = "this oid was not defined (no data record for it found)" for oid in oids: if oid not in oid2name: msgs.append((oid, None, NOT_SEEN)) msgs.sort() # oids are primary key, tids secondary current_oid = current_tid = None for oid, tid, msg in msgs: if oid != current_oid: nrev = oids[oid] revision = "revision" + (nrev != 1 and 's' or '') name = oid2name.get(oid, "<unknown>") print "oid", oid_repr(oid), name, nrev, revision current_oid = oid current_tid = None if msg is NOT_SEEN: assert tid is None print " ", msg continue if tid != current_tid: current_tid = tid status, user, description, pos = self.tid2info[tid] print " tid %s offset=%d %s" % (tid_repr(tid), pos, TimeStamp(tid)) print " tid user=%r" % shorten(user) print " tid description=%r" % shorten(description) print " ", msg
def _getCleanFilename(self, oid, tid): return os.path.join( self._getBlobPath(), "%s-%s%s" % ( utils.oid_repr(oid), utils.tid_repr(tid), SAVEPOINT_SUFFIX, ))
def patched_field_get_size(self): """ Patch for get_size """ try: blob = openBlob(self.blob) size = fstat(blob.fileno()).st_size blob.close() except POSKeyError: oid = self.blob._p_oid directories = [] # Create the bushy directory structure with the least significant byte # first for byte in str(oid): directories.append('0x%s' % binascii.hexlify(byte)) path = os.path.sep.join(directories) cached = self.blob._p_blob_committed logger.error("BLOBWARNING: Could not get " "field size for blob %r. Info about blob: " "OID (oid, repr, path on zeo storage): %r > %r > %r " "CACHED (path to cached blob): %r ", self, oid_repr(oid), oid.__repr__(), path, cached) size = 0 return size
def patched_getSize(self): """ Return image dimensions of the blob """ try: blob = openBlob(self.blob) except POSKeyError: oid = self.blob._p_oid directories = [] # Create the bushy directory structure with the least significant byte # first for byte in str(oid): directories.append('0x%s' % binascii.hexlify(byte)) path = os.path.sep.join(directories) cached = self.blob._p_blob_committed logger.error("BLOBWARNING: Could not get " "image size for blob %r. Info about blob: " "OID (oid, repr, path on zeo storage): %r > %r > %r " "CACHED (path to cached blob): %r ", self.blob._p_oid, oid_repr(oid), oid.__repr__(), path, cached) return 0 size = getImageSize(blob) blob.close() return size
def patched_field_get_size(self): """ Patch for get_size """ try: blob = openBlob(self.blob) size = fstat(blob.fileno()).st_size blob.close() except POSKeyError: oid = self.blob._p_oid directories = [] # Create the bushy directory structure with the least significant byte # first for byte in str(oid): directories.append('0x%s' % binascii.hexlify(byte)) path = os.path.sep.join(directories) cached = self.blob._p_blob_committed logger.error( "BLOBWARNING: Could not get " "field size for blob %r. Info about blob: " "OID (oid, repr, path on zeo storage): %r > %r > %r " "CACHED (path to cached blob): %r ", self, oid_repr(oid), oid.__repr__(), path, cached) size = 0 return size
def _store(self, oid, serial, data, version): err = None try: newserial = self.storage.store(oid, serial, data, version, self.transaction) except (SystemExit, KeyboardInterrupt): raise except Exception, err: self.store_failed = 1 if isinstance(err, ConflictError): self.stats.conflicts += 1 self.log( "conflict error oid=%s msg=%s" % (oid_repr(oid), str(err)), BLATHER) if not isinstance(err, TransactionError): # Unexpected errors are logged and passed to the client self.log("store error: %s, %s" % sys.exc_info()[:2], logging.ERROR, exc_info=True) # Try to pickle the exception. If it can't be pickled, # the RPC response would fail, so use something else. pickler = cPickle.Pickler() pickler.fast = 1 try: pickler.dump(err, 1) except: msg = "Couldn't pickle storage exception: %s" % repr(err) self.log(msg, logging.ERROR) err = StorageServerError(msg) # The exception is reported back as newserial for this oid newserial = [(oid, err)]
def listHistory(self): if 'tid' in self.request: requested_tid = p64(int(self.request['tid'], 0)) else: requested_tid = None results = [] for n, d in enumerate(self.history[self.first_idx:self.last_idx]): utid = u64(d.tid) ts = TimeStamp(d.tid).timeTime() utc_timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(ts)) local_timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(ts)) try: user_location, user_id = d.user.split() except ValueError: user_location = None user_id = d.user try: size = d._tend - d._tpos except AttributeError: size = None ext = d.extension if isinstance(d.extension, dict) else {} objects = [] for record in d: obj = self.jar.get(record.oid) url = "@@zodbbrowser?oid=0x%x&tid=0x%x" % (u64(record.oid), utid) objects.append(dict( oid=u64(record.oid), path=getObjectPath(obj, d.tid), oid_repr=oid_repr(record.oid), class_repr=getObjectType(obj), url=url, repr=IValueRenderer(obj).render(d.tid), )) if len(objects) == 1: summary = '1 object record' else: summary = '%d object records' % len(objects) if size is not None: summary += ' (%d bytes)' % size results.append(dict( index=(self.first_idx + n + 1), utc_timestamp=utc_timestamp, local_timestamp=local_timestamp, user_id=user_id, user_location=user_location, description=d.description, utid=utid, current=(d.tid == requested_tid), href=self.getUrl(tid=utid), size=size, summary=summary, hidden=(len(objects) > 5), objects=objects, **ext )) if results and not requested_tid and self.page == 0: results[-1]['current'] = True return results[::-1]
def patched_getSize(self): """ Return image dimensions of the blob """ try: blob = openBlob(self.blob) except POSKeyError: oid = self.blob._p_oid directories = [] # Create the bushy directory structure with the least significant byte # first for byte in str(oid): directories.append('0x%s' % binascii.hexlify(byte)) path = os.path.sep.join(directories) cached = self.blob._p_blob_committed logger.error( "BLOBWARNING: Could not get " "image size for blob %r. Info about blob: " "OID (oid, repr, path on zeo storage): %r > %r > %r " "CACHED (path to cached blob): %r ", self.blob._p_oid, oid_repr(oid), oid.__repr__(), path, cached) return 0 size = getImageSize(blob) blob.close() return size
def test_get_etag_file_newer(self): # E.g. if title is updated after upload from ZODB.utils import oid_repr inst = self._makeOne(None, None) inst._p_serial = b'EDABEDAC' blob = inst.blob = DummyBlob() blob._p_serial = b'DEADBEEF' etag = inst.get_etag() self.assertEqual(etag, oid_repr(b'EDABEDAC'))
def __str__(self): if self.oid: msg = "Error reading oid %s. Found %r" % (oid_repr(self.oid), self.buf) else: msg = "Error reading unknown oid. Found %r" % self.buf if self.pos: msg += " at %d" % self.pos return msg
def __str__(self): if self.oid: msg = "Error reading oid %s. Found %r" % (oid_repr( self.oid), self.buf) else: msg = "Error reading unknown oid. Found %r" % self.buf if self.pos: msg += " at %d" % self.pos return msg
def setstate(self, obj): """Turns the ghost 'obj' into a real object by loading it's from the database.""" oid = obj._p_oid if self._opened is None: msg = ("Shouldn't load state for %s " "when the connection is closed" % oid_repr(oid)) self._log.error(msg) raise ConnectionStateError(msg) try: self._setstate(obj) except ConflictError: raise except: self._log.error("Couldn't load state for %s", oid_repr(oid), exc_info=sys.exc_info()) raise
def add_reference(self, blob, reference): """Add reference to given blob""" oid = getattr(blob, '_p_oid') if not oid: getattr(reference, '_p_jar').add(blob) oid = getattr(blob, '_p_oid') oid = oid_repr(oid) refs = self.refs.get(oid) or set() refs.add(reference) self.refs[oid] = refs
def test_get_etag_blob_newer(self): # E.g. if upload after setting title from ZODB.utils import oid_repr inst = self._makeOne(None, None) inst._p_serial = b"DEADBEEF" blob = inst.blob = DummyBlob() blob._p_serial = b"EDABEDAC" etag = inst.get_etag() self.assertEqual(etag, oid_repr(b"EDABEDAC"))
def get_etag(self): """ Return a token identifying the "version" of the file. """ self._p_activate() mine = self._p_serial blob = self.blob._p_serial if blob == z64: self.blob._p_activate() blob = self.blob._p_serial return oid_repr(max(mine, blob))
def test_get_etag_file_newer_w_ghost_blob(self): # E.g. if title is updated after upload from ZODB.utils import oid_repr from ZODB.utils import z64 inst = self._makeOne(None, None) inst._p_serial = b"EDABEDAC" blob = inst.blob = DummyBlob() blob._p_serial = z64 etag = inst.get_etag() self.assertEqual(etag, oid_repr(b"EDABEDAC"))
def setstate(self, obj): """Turns the ghost 'obj' into a real object by loading its state from the database.""" oid = obj._p_oid if self._opened is None: msg = ("Shouldn't load state for %s " "when the connection is closed" % oid_repr(oid)) self._log.error(msg) raise ConnectionStateError(msg) try: self._setstate(obj) except ConflictError: raise except: self._log.error("Couldn't load state for %s", oid_repr(oid), exc_info=sys.exc_info()) raise
def object_hint(o): """Return a string describing the object. This function does not raise an exception. """ # We should always be able to get __class__. klass = o.__class__.__name__ # oid would be great, but may this isn't a persistent object. oid = getattr(o, "_p_oid", _marker) if oid is not _marker: oid = oid_repr(oid) return "%s oid=%s" % (klass, oid)
def _splat(self): """Spit out a string showing state. """ o=[] o.append('Transactions:') for tid, (p, u, d, e, t) in self._data.items(): o.append(" %s %s" % (TimeStamp(tid), p)) for r in t: oid, pre, vdata, p, tid = r oid = oid_repr(oid) tid = oid_repr(tid) ## if serial is not None: serial=str(TimeStamp(serial)) pre=id(pre) if vdata and vdata[1]: vdata=vdata[0], id(vdata[1]) if p: p='' o.append(' %s: %s' % (id(r), `(oid, pre, vdata, p, tid)`)) o.append('\nIndex:') items=self._index.items() items.sort() for oid, r in items: if r: r=id(r) o.append(' %s: %s' % (oid_repr(oid), r)) o.append('\nVersion Index:') items=self._vindex.items() items.sort() for version, v in items: o.append(' '+version) vitems=v.items() vitems.sort() for oid, r in vitems: if r: r=id(r) o.append(' %s: %s' % (oid_repr(oid), r)) return '\n'.join(o)
def __str__(self): extras = [] if self.oid: extras.append("oid %s" % oid_repr(self.oid)) if self.class_name: extras.append("class %s" % self.class_name) if self.serials: current, old = self.serials extras.append("serial this txn started with %s" % readable_tid_repr(old)) extras.append("serial currently committed %s" % readable_tid_repr(current)) if extras: return "%s (%s)" % (self.message, ", ".join(extras)) else: return self.message
def drop_reference(self, blob, reference): """Remove reference from given blob""" oid = oid_repr(getattr(blob, '_p_oid')) refs = self.refs.get(oid) if refs is not None: if reference in refs: refs.remove(reference) if refs: self.refs[oid] = refs else: del self.refs[oid] del blob else: del blob
def setstate(self, obj): """Load the state for an (ghost) object """ oid = obj._p_oid if self.opened is None: msg = ("Shouldn't load state for %s %s " "when the connection is closed" % (className(obj), oid_repr(oid))) try: raise ConnectionStateError(msg) except: self._log.exception(msg) raise try: p, serial = self._storage.load(oid) self._load_count += 1 self._reader.setGhostState(obj, p) obj._p_serial = serial self._cache.update_object_size_estimation(oid, len(p)) obj._p_estimated_size = len(p) # Blob support if isinstance(obj, Blob): obj._p_blob_uncommitted = None obj._p_blob_committed = self._storage.loadBlob(oid, serial) except ConflictError: raise except: self._log.exception("Couldn't load state for %s %s", className(obj), oid_repr(oid)) raise
def object_hint(o): """Return a string describing the object. This function does not raise an exception. """ from ZODB.utils import oid_repr # We should always be able to get __class__. klass = o.__class__.__name__ # oid would be great, but may this isn't a persistent object. oid = getattr(o, "_p_oid", _marker) if oid is not _marker: oid = oid_repr(oid) return "%s oid=%s" % (klass, oid)
def __iter__(self): """Iterates over all objects in database.""" storage = self.connection._storage oid_loader = IOIDLoader(storage) for oid in oid_loader.getOIDs(): try: object = self.connection.get(oid) except POSKeyError: # XXX warg. For some reason the oids we get back might refer to # non-existing objects, although the database seems consistent. log = logging.getLogger("gocept.zodb") log.warn("Found POSKeyError while iterating over database. " "OID: %s" % oid_repr(oid)) continue yield object
def migrate(source, dest, layout): source_fsh = FilesystemHelper(source) source_fsh.create() dest_fsh = FilesystemHelper(dest, layout) dest_fsh.create() print "Migrating blob data from `%s` (%s) to `%s` (%s)" % ( source, source_fsh.layout_name, dest, dest_fsh.layout_name) for oid, path in source_fsh.listOIDs(): dest_path = dest_fsh.getPathForOID(oid, create=True) files = os.listdir(path) for file in files: source_file = os.path.join(path, file) dest_file = os.path.join(dest_path, file) link_or_copy(source_file, dest_file) print "\tOID: %s - %s files " % (oid_repr(oid), len(files))
def testCall(self): request = TestRequest() view = self._zodbInfoView(self.root, request) self.assertEquals(view(), '') self.assertEquals(view.latest, True) tid = ZodbObjectState(self.root).tid request = TestRequest(form={'tid': tid_repr(tid)}) view = self._zodbInfoView(self.root, request) self.assertEquals(view.latest, False) oid = self.root._p_oid request = TestRequest(form={'oid': oid_repr(oid)}) request.annotations['ZODB.interfaces.IConnection'] = self.root._p_jar view = self._zodbInfoView(None, request)
def testCall(self): request = TestRequest() view = self._zodbInfoView(self.root, request) self.assertEqual(view(), '') self.assertEqual(view.latest, True) tid = ZodbObjectState(self.root).tid request = TestRequest(form={'tid': tid_repr(tid)}) view = self._zodbInfoView(self.root, request) self.assertEqual(view.latest, False) oid = self.root._p_oid request = TestRequest(form={'oid': oid_repr(oid)}) request.annotations['ZODB.interfaces.IConnection'] = self.root._p_jar view = self._zodbInfoView(None, request)
def __call__(self): try: return self.render() finally: if self.readonly or not self.made_changes: resources = transaction.get()._resources if resources: msg = ["Aborting changes made to:"] for r in resources: if isinstance(r, Connection): for o in r._registered_objects: msg.append(" oid=%s %s" % (oid_repr(o._p_oid), repr(o))) else: msg.append(" %s" % repr(r)) log.debug("\n".join(msg)) transaction.abort()
def _repr(self): # hook for subclasses if self._has_no_repr(self.context): # Special-case objects with the default __repr__ (LP#1087138) if isinstance(self.context, Persistent): return '<%s.%s with oid %s>' % ( self.context.__class__.__module__, self.context.__class__.__name__, oid_repr(self.context._p_oid)) try: return repr(self.context) except Exception: try: return '<unrepresentable %s>' % self.context.__class__.__name__ except Exception: return '<unrepresentable>'
def _repr(self): # hook for subclasses if self.context.__class__.__repr__ is object.__repr__: # Special-case objects with the default __repr__ (LP#1087138) if isinstance(self.context, Persistent): return '<%s.%s with oid %s>' % ( self.context.__class__.__module__, self.context.__class__.__name__, oid_repr(self.context._p_oid)) try: return repr(self.context) except Exception: try: return '<unrepresentable %s>' % self.context.__class__.__name__ except Exception: return '<unrepresentable>'
def verify_oid(storage, oid, debug=False, app=None): if not IStorageCurrentRecordIteration.providedBy(storage): raise TypeError( "ZODB storage {} does not implement record_iternext".format(storage) ) try: # by default expect a 8-byte string (e.g. '0x22d17d') # transform to a 64-bit long integer (e.g. b'\x00\x00\x00\x00\x00"\xd1}') as_int = int(oid, 0) oid = p64(as_int) except ValueError: # probably already a 64-bit long integer pass if app: # use exitsing zope instance. # only available when used as ./bin/instance zodbverify -o XXX connection = app._p_jar else: # connect to database to be able to load the object db = ZODB.DB(storage) connection = db.open() try: obj = connection.get(oid) try: logger.info("\nObject as dict:\n{}".format(vars(obj))) except TypeError: pass if debug: hint = "\nThe object is 'obj'" if app: hint += "\nThe Zope instance is 'app'" logger.info(hint) pdb.set_trace() except Exception as e: logger.info("Could not load object") logger.info(traceback.format_exc()) if debug: pdb.set_trace() pickle, state = storage.load(oid) success, msg = verify_record(oid, pickle, debug) if not success: logger.info('{}: {}'.format(msg, oid_repr(oid)))
def report(oid, data, serial, missing): from_mod, from_class = get_pickle_metadata(data) if len(missing) > 1: plural = "s" else: plural = "" ts = TimeStamp(serial) print("oid %s %s.%s" % (hex(u64(oid)), from_mod, from_class)) print("last updated: %s, tid=%s" % (ts, hex(u64(serial)))) print("refers to invalid object%s:" % plural) for oid, info, reason in missing: if isinstance(info, tuple): description = "%s.%s" % info else: description = str(info) print("\toid %s %s: %r" % (oid_repr(oid), reason, description)) print()
def report(oid, data, serial, missing): from_mod, from_class = get_pickle_metadata(data) if len(missing) > 1: plural = "s" else: plural = "" ts = TimeStamp(serial) print "oid %s %s.%s" % (hex(u64(oid)), from_mod, from_class) print "last updated: %s, tid=%s" % (ts, hex(u64(serial))) print "refers to invalid object%s:" % plural for oid, info, reason in missing: if isinstance(info, types.TupleType): description = "%s.%s" % info else: description = str(info) print "\toid %s %s: %r" % (oid_repr(oid), reason, description) print
def listHistory(self): if 'tid' in self.request: requested_tid = p64(int(self.request['tid'], 0)) else: requested_tid = None results = [] for n, d in enumerate(self.history[self.first_idx:self.last_idx]): utid = u64(d.tid) short = "%s %s %s" % (TimeStamp(d.tid), d.user, d.description) objects = [] for record in d: obj = self.jar.get(record.oid) url = "@@zodbbrowser?oid=0x%x&tid=0x%x" % (u64(record.oid), utid) objects.append(dict( oid=u64(record.oid), path=getObjectPath(obj, d.tid), oid_repr=oid_repr(record.oid), class_repr=getObjectType(obj), url=url, repr=IValueRenderer(obj).render(d.tid), )) if len(objects) == 1: summary = '1 object record' else: summary = '%d object records' % len(objects) results.append(dict( index=(self.first_idx + n + 1), short=short, utid=utid, current=(d.tid == requested_tid), href=self.getUrl(tid=utid), summary=summary, hidden=(len(objects) > 5), objects=objects, )) if results and not requested_tid and self.page == 0: results[-1]['current'] = True return results[::-1]
def get_oid(self): """ get oid """ field = self.getField('image') blob = field.getRaw(self).getBlob() oid = blob._p_oid directories = [] # Create the bushy directory structure with the least significant byte # first for byte in str(oid): directories.append('0x%s' % binascii.hexlify(byte)) path = os.path.sep.join(directories) cached = blob._p_blob_committed return """<html><body> oid (oid, repr, path on zeo storage): %s > %s > %s <br/> cached (path to cached blob): %s <br/> </body></html> """ % (oid_repr(oid), oid.__repr__(), path, cached)
def verify_zodb(storage, debug=False): if not IStorageCurrentRecordIteration.providedBy(storage): raise TypeError( "ZODB storage {} does not implement record_iternext".format( storage)) logger.info("Scanning ZODB...") next_ = None count = 0 errors = 0 issues = defaultdict(list) oids = [] while True: count += 1 oid, tid, data, next_ = storage.record_iternext(next_) logger.debug("Verifying {}".format(oid)) success, msg = verify_record(oid, data, debug) if not success: errors += 1 issues[msg].append(oid_repr(oid)) # issues.append(msg) oids.append(oid) if next_ is None: break msg = "" order = sorted(issues, key=lambda k: len(issues[k]), reverse=True) for key in order: oids = issues[key] msg += "{}: {}\n{}\n\n".format(key, len(oids), ' '.join(oids)) logger.info( "Done! Scanned {} records. \n" "Found {} records that could not be loaded. \n" "Exceptions, how often they happened and which oids are affected: \n\n" "{}".format(count, errors, msg))
def report(self): """Show all msgs, grouped by oid and sub-grouped by tid.""" msgs = self.msgs oids = self.oids oid2name = self.oid2name # First determine which oids weren't seen at all, and synthesize msgs # for them. NOT_SEEN = "this oid was not defined (no data record for it found)" for oid in oids: if oid not in oid2name: msgs.append( (oid, None, NOT_SEEN) ) msgs.sort() # oids are primary key, tids secondary current_oid = current_tid = None for oid, tid, msg in msgs: if oid != current_oid: nrev = oids[oid] revision = "revision" + (nrev != 1 and 's' or '') name = oid2name.get(oid, "<unknown>") print "oid", oid_repr(oid), name, nrev, revision current_oid = oid current_tid = None if msg is NOT_SEEN: assert tid is None print " ", msg continue if tid != current_tid: current_tid = tid status, user, description, pos = self.tid2info[tid] print " tid %s offset=%d %s" % (tid_repr(tid), pos, TimeStamp(tid)) print " tid user=%r" % shorten(user) print " tid description=%r" % shorten(description) print " ", msg
def oid_to_path(self, oid): return utils.oid_repr(oid)
pickler = cPickle.Pickler() pickler.fast = 1 try: pickler.dump(err, 1) except: msg = "Couldn't pickle storage exception: %s" % repr(err) self.log(msg, logging.ERROR) err = StorageServerError(msg) # The exception is reported back as newserial for this oid newserial = err else: if serial != "\0\0\0\0\0\0\0\0": self.invalidated.append((oid, version)) if newserial == ResolvedSerial: self.stats.conflicts_resolved += 1 self.log("conflict resolved oid=%s" % oid_repr(oid), BLATHER) self.serials.append((oid, newserial)) return err is None def _vote(self): self.client.serialnos(self.serials) # If a store call failed, then return to the client immediately. # The serialnos() call will deliver an exception that will be # handled by the client in its tpc_vote() method. if self.store_failed: return return self.storage.tpc_vote(self.transaction) def _abortVersion(self, src): tid, oids = self.storage.abortVersion(src, self.transaction) inv = [(oid, src) for oid in oids]
def __str__(self): return "from %s to %s" % (oid_repr(self.referer), oid_repr(self.missing))
def _fmt_undo(oid, reason): s = reason and (": %s" % reason) or "" return "Undo error %s%s" % (oid_repr(oid), s)
def __str__(self): return oid_repr(self.args[0])