def loadBefore(self, oid, tid): """ Return the most recent revision of oid before tid committed. """ if tid is maxtid or tid == maxtid: # This is probably from ZODB.utils.load_current(), which # is really trying to just get the current state of the # object. This is almost entirely just from test cases; ZODB 5's mvccadapter # doesn't even expose it, so ZODB.Connection doesn't use it. # # Shortcut the logic below by using load() (current), # formatted in the way this method returns it: # # ``(state, tid # of state, tid_after_state)`` # # where tid_after_state will naturally be None return self.load(oid) + (None,) oid_int = bytes8_to_int64(oid) # TODO: This makes three separate queries, and also bypasses the cache. # We should be able to fix at least the multiple queries. if self.store_connection: # Allow loading data from later transactions # for conflict resolution. # XXX: This doesn't seem to be used in conflict # resolution. ZODB.ConflictResolution.tryToResolveConflict # calls loadSerial(); About the only call in ZODB to # loadBefore() is from BlobStorage.undo() (which # RelStorage does not extend). Mixing and matching calls # between connections using different isolation levels # isn't great. Can we stop doing this? cursor = self.store_connection.cursor else: cursor = self.load_connection.cursor if not self.adapter.mover.exists(cursor, oid_int): raise POSKeyError(oid) state, start_tid = self.adapter.mover.load_before( cursor, oid_int, bytes8_to_int64(tid)) if start_tid is None: return None if state is None: # This can happen if something attempts to load # an object whose creation has been undone, see load() # This change fixes the test in # TransactionalUndoStorage.checkUndoCreationBranch1 # self._log_keyerror doesn't work here, only in certain states. raise POSKeyError(oid) end_int = self.adapter.mover.get_object_tid_after( cursor, oid_int, start_tid) if end_int is not None: end = int64_to_8bytes(end_int) else: end = None return state, int64_to_8bytes(start_tid), end
def loadBlob(self, oid, serial): """Return the filename where the blob file can be found. """ filename = self.fshelper.getBlobFilename(oid, serial) if not os.path.exists(filename): raise POSKeyError("No blob file", oid, serial) return filename
def patched_field_index_html(self, instance, REQUEST=None, RESPONSE=None, disposition='inline'): """ Patch for index_html for field """ try: blob = self._old_index_html(instance, REQUEST=REQUEST, RESPONSE=RESPONSE, disposition=disposition) if blob: return blob raise POSKeyError() except POSKeyError: logger.warning( "BLOBWARNING: When doing index_html " "for field %r for %r with request %r return an empty file", self, instance, REQUEST) if not RESPONSE: RESPONSE = instance.REQUEST.RESPONSE putils = getToolByName(instance, 'plone_utils') putils.addPortalMessage('Discovered an empty BLOB file for %r' % instance.absolute_url_path(), type='warning') RESPONSE.redirect(instance.absolute_url() + '/view')
def _loadBlobInternal(self, cursor, oid, serial, blob_lock=None): blob_filename = self._cachedLoadBlobInternal(oid, serial) if not blob_filename: # All the blobs are in a shared directory. If the file # isn't here, it's not anywhere. raise POSKeyError("No blob file", oid, serial) return blob_filename
def history(self, oid, version=None, size=1, filter=None): # pylint:disable=unused-argument,too-many-locals cursor = self.load_connection.cursor oid_int = bytes8_to_int64(oid) try: rows = self.adapter.dbiter.iter_object_history(cursor, oid_int) except KeyError: raise POSKeyError(oid) res = [] for tid_int, username, description, extension, length in rows: tid = int64_to_8bytes(tid_int) if extension: d = loads(extension) else: d = {} d.update({ "time": TimeStamp(tid).timeTime(), "user_name": username or b'', "description": description or b'', "tid": tid, "version": '', "size": length, "rs_tid_int": tid_int, "rs_oid_int": oid_int, }) if filter is None or filter(d): res.append(d) if size is not None and len(res) >= size: break return res
def load(self, oid, version=''): # pylint:disable=unused-argument oid_int = bytes8_to_int64(oid) state, tid_int = self.__load_using_method(self.cache.load, oid_int) if tid_int is None: _log_keyerror(self.load_connection.cursor, self.adapter, oid_int, "no tid found") raise POSKeyError(oid) if not state: # This can happen if something attempts to load # an object whose creation has been undone or which was deleted # by IExternalGC.deleteObject(). _log_keyerror(self.load_connection.cursor, self.adapter, oid_int, "creation has been undone") raise POSKeyError(oid) return state, int64_to_8bytes(tid_int)
def _loadBack_impl(self, oid, back, fail=True): # shared implementation used by various _loadBack methods # # If the backpointer ultimately resolves to 0: # If fail is True, raise KeyError for zero backpointer. # If fail is False, return the empty data from the record # with no backpointer. while 1: if not back: # If backpointer is 0, object does not currently exist. raise POSKeyError(oid) h = self._read_data_header(back) if h.plen: return self._file.read(h.plen), h.tid, back, h.tloc if h.back == 0 and not fail: return None, h.tid, back, h.tloc back = h.back
def _loadBlobLocked(self, cursor, oid, serial, blob_filename): """ Returns a filename that exists on disk, or raises a POSKeyError. """ # OK, it's not here and we (or someone) needs to get it. We # want to avoid getting it multiple times. We want to avoid # getting it multiple times even accross separate client # processes on the same machine. We'll use file locking. # (accomplished by our caller.) # We got the lock, so it's our job to download it. First, # we'll double check that someone didn't download it while # we were getting the lock: if os.path.exists(blob_filename): return self._accessed(blob_filename) self.download_blob(cursor, oid, serial, blob_filename) if os.path.exists(blob_filename): return self._accessed(blob_filename) raise POSKeyError("No blob file", oid, serial)
def loadSerial(self, oid, serial): """Load a specific revision of an object""" oid_int = bytes8_to_int64(oid) tid_int = bytes8_to_int64(serial) # If we've got this state cached exactly, # use it. No need to poll or anything like that first; # polling is unlikely to get us the state we want. # If the data happens to have been removed from the database, # due to a pack, this won't detect it if it was already cached # and the pack happened somewhere else. This method is # only used for conflict resolution, though, and we # shouldn't be able to get to that point if the root revision # went missing, right? Packing periodically takes the same locks we # want to take for committing. state = self.cache.loadSerial(oid_int, tid_int) if state: return state for conn in self.store_connection, self.load_connection: if not conn: continue # Allow loading data from later transactions for conflict # resolution. In fact try that first because it's more # likely that our old load connection can't see this new # state (because this method is used only for conflict resolution). state = self.adapter.mover.load_revision( conn.cursor, oid_int, tid_int) if state is not None: break if state is None or not state: raise POSKeyError(oid) return state
def __getattr__(self, name): raise POSKeyError(name)