def copyTransactionsFromTo(source, destination): for trans in source.iterator(): destination.tpc_begin(trans, trans.tid, trans.status) for record in trans: blobfilename = None if is_blob_record(record.data): try: blobfilename = source.loadBlob(record.oid, record.tid) except POSKeyError: pass if blobfilename is not None: fd, name = tempfile.mkstemp( prefix='CTFT', suffix='.tmp', dir=destination.fshelper.temp_dir) os.close(fd) with open(blobfilename, 'rb') as sf: with open(name, 'wb') as df: utils.cp(sf, df) destination.restoreBlob(record.oid, record.tid, record.data, name, record.data_txn, trans) else: destination.restore(record.oid, record.tid, record.data, '', record.data_txn, trans) destination.tpc_vote(trans) destination.tpc_finish(trans)
def open(self, mode="r"): if mode not in valid_modes: raise ValueError("invalid mode", mode) if self.writers: raise BlobError("Already opened for writing.") if self.readers is None: self.readers = [] if mode == 'r': if self._current_filename() is None: self._create_uncommitted_file() result = BlobFile(self._current_filename(), mode, self) def destroyed(ref, readers=self.readers): try: readers.remove(ref) except ValueError: pass self.readers.append(weakref.ref(result, destroyed)) else: if self.readers: raise BlobError("Already opened for reading.") if mode == 'w': if self._p_blob_uncommitted is None: self._create_uncommitted_file() result = BlobFile(self._p_blob_uncommitted, mode, self) else: if self._p_blob_uncommitted is None: # Create a new working copy self._create_uncommitted_file() result = BlobFile(self._p_blob_uncommitted, mode, self) utils.cp(file(self._p_blob_committed), result) if mode == 'r+': result.seek(0) else: # Re-use existing working copy result = BlobFile(self._p_blob_uncommitted, mode, self) def destroyed(ref, writers=self.writers): try: writers.remove(ref) except ValueError: pass self.writers.append(weakref.ref(result, destroyed)) self._p_changed = True return result
def undo(self, serial_id, transaction): undo_serial, keys = getProxiedObject(self).undo(serial_id, transaction) # serial_id is the transaction id of the txn that we wish to undo. # "undo_serial" is the transaction id of txn in which the undo is # performed. "keys" is the list of oids that are involved in the # undo transaction. # The serial_id is assumed to be given to us base-64 encoded # (belying the web UI legacy of the ZODB code :-() serial_id = base64.decodestring(serial_id + '\n') self._lock_acquire() try: # we get all the blob oids on the filesystem related to the # transaction we want to undo. for oid in self.fshelper.getOIDsForSerial(serial_id): # we want to find the serial id of the previous revision # of this blob object. load_result = self.loadBefore(oid, serial_id) if load_result is None: # There was no previous revision of this blob # object. The blob was created in the transaction # represented by serial_id. We copy the blob data # to a new file that references the undo # transaction in case a user wishes to undo this # undo. It would be nice if we had some way to # link to old blobs. orig_fn = self.fshelper.getBlobFilename(oid, serial_id) new_fn = self.fshelper.getBlobFilename(oid, undo_serial) else: # A previous revision of this blob existed before the # transaction implied by "serial_id". We copy the blob # data to a new file that references the undo transaction # in case a user wishes to undo this undo. data, serial_before, serial_after = load_result orig_fn = self.fshelper.getBlobFilename(oid, serial_before) new_fn = self.fshelper.getBlobFilename(oid, undo_serial) orig = open(orig_fn, "r") new = open(new_fn, "wb") utils.cp(orig, new) orig.close() new.close() self.dirty_oids.append((oid, undo_serial)) finally: self._lock_release() return undo_serial, keys
def undo(self, serial_id, transaction): undo_serial, keys = self.__storage.undo(serial_id, transaction) # serial_id is the transaction id of the txn that we wish to undo. # "undo_serial" is the transaction id of txn in which the undo is # performed. "keys" is the list of oids that are involved in the # undo transaction. # The serial_id is assumed to be given to us base-64 encoded # (belying the web UI legacy of the ZODB code :-() serial_id = base64.decodestring(serial_id+'\n') self._lock_acquire() try: # we get all the blob oids on the filesystem related to the # transaction we want to undo. for oid in self.fshelper.getOIDsForSerial(serial_id): # we want to find the serial id of the previous revision # of this blob object. load_result = self.loadBefore(oid, serial_id) if load_result is None: # There was no previous revision of this blob # object. The blob was created in the transaction # represented by serial_id. We copy the blob data # to a new file that references the undo # transaction in case a user wishes to undo this # undo. It would be nice if we had some way to # link to old blobs. orig_fn = self.fshelper.getBlobFilename(oid, serial_id) new_fn = self.fshelper.getBlobFilename(oid, undo_serial) else: # A previous revision of this blob existed before the # transaction implied by "serial_id". We copy the blob # data to a new file that references the undo transaction # in case a user wishes to undo this undo. data, serial_before, serial_after = load_result orig_fn = self.fshelper.getBlobFilename(oid, serial_before) new_fn = self.fshelper.getBlobFilename(oid, undo_serial) orig = open(orig_fn, "r") new = open(new_fn, "wb") utils.cp(orig, new) orig.close() new.close() self.dirty_oids.append((oid, undo_serial)) finally: self._lock_release() return undo_serial, keys
def rename_or_copy_blob(f1, f2, chmod=True): """Try to rename f1 to f2, fallback to copy. Under certain conditions a rename might not work, e.g. because the target directory is on a different partition. In this case we try to copy the data and remove the old file afterwards. """ try: os.rename(f1, f2) except OSError: copied("Copied blob file %r to %r.", f1, f2) with open(f1, 'rb') as file1: with open(f2, 'wb') as file2: utils.cp(file1, file2) remove_committed(f1) if chmod: os.chmod(f2, stat.S_IREAD)
def exportFile(self, oid, f=None): if f is None: f = TemporaryFile(prefix="EXP") elif isinstance(f, six.string_types): f = open(f, 'w+b') f.write(b'ZEXP') oids = [oid] done_oids = {} done = done_oids.__contains__ load = self._storage.load supports_blobs = IBlobStorage.providedBy(self._storage) while oids: oid = oids.pop(0) if oid in done_oids: continue done_oids[oid] = True try: p, serial = load(oid) except: logger.debug("broken reference for oid %s", repr(oid), exc_info=True) else: referencesf(p, oids) f.writelines([oid, p64(len(p)), p]) if supports_blobs: if not isinstance(self._reader.getGhost(p), Blob): continue # not a blob blobfilename = self._storage.loadBlob(oid, serial) f.write(blob_begin_marker) f.write(p64(os.stat(blobfilename).st_size)) blobdata = open(blobfilename, "rb") cp(blobdata, f) blobdata.close() f.write(export_end_marker) return f
def exportFile(self, oid, f=None): if f is None: f = TemporaryFile(prefix="EXP") elif isinstance(f, six.string_types): f = open(f,'w+b') f.write(b'ZEXP') oids = [oid] done_oids = {} done = done_oids.__contains__ load = self._storage.load supports_blobs = IBlobStorage.providedBy(self._storage) while oids: oid = oids.pop(0) if oid in done_oids: continue done_oids[oid] = True try: p, serial = load(oid) except: logger.debug("broken reference for oid %s", repr(oid), exc_info=True) else: referencesf(p, oids) f.writelines([oid, p64(len(p)), p]) if supports_blobs: if not isinstance(self._reader.getGhost(p), Blob): continue # not a blob blobfilename = self._storage.loadBlob(oid, serial) f.write(blob_begin_marker) f.write(p64(os.stat(blobfilename).st_size)) blobdata = open(blobfilename, "rb") cp(blobdata, f) blobdata.close() f.write(export_end_marker) return f
def copyTransactionsFrom(self, other): for trans in other.iterator(): self.tpc_begin(trans, trans.tid, trans.status) for record in trans: blobfilename = None if is_blob_record(record.data): try: blobfilename = other.loadBlob(record.oid, record.tid) except POSKeyError: pass if blobfilename is not None: fd, name = tempfile.mkstemp( suffix='.tmp', dir=self.fshelper.temp_dir) os.close(fd) utils.cp(open(blobfilename, 'rb'), open(name, 'wb')) self.restoreBlob(record.oid, record.tid, record.data, name, record.data_txn, trans) else: self.restore(record.oid, record.tid, record.data, '', record.data_txn, trans) self.tpc_vote(trans) self.tpc_finish(trans)
def _importDuringCommit(self, transaction, f, return_oid_list): """Import data during two-phase commit. Invoked by the transaction manager mid commit. Appends one item, the OID of the first object created, to return_oid_list. """ oids = {} # IMPORTANT: This code should be consistent with the code in # serialize.py. It is currently out of date and doesn't handle # weak references. def persistent_load(ooid): """Remap a persistent id to a new ID and create a ghost for it.""" klass = None if isinstance(ooid, tuple): ooid, klass = ooid if not isinstance(ooid, bytes): assert isinstance(ooid, str) # this happens on Python 3 when all bytes in the oid are < 0x80 ooid = ooid.encode('ascii') if ooid in oids: oid = oids[ooid] else: if klass is None: oid = self._storage.new_oid() else: oid = self._storage.new_oid(), klass oids[ooid] = oid return Ghost(oid) while 1: header = f.read(16) if header == export_end_marker: break if len(header) != 16: raise ExportError("Truncated export file") # Extract header information ooid = header[:8] length = u64(header[8:16]) data = f.read(length) if len(data) != length: raise ExportError("Truncated export file") if oids: oid = oids[ooid] if isinstance(oid, tuple): oid = oid[0] else: oids[ooid] = oid = self._storage.new_oid() return_oid_list.append(oid) # Blob support blob_begin = f.read(len(blob_begin_marker)) if blob_begin == blob_begin_marker: # Copy the blob data to a temporary file # and remember the name blob_len = u64(f.read(8)) blob_filename = mktemp() blob_file = open(blob_filename, "wb") cp(f, blob_file, blob_len) blob_file.close() else: f.seek(-len(blob_begin_marker), 1) blob_filename = None pfile = BytesIO(data) unpickler = Unpickler(pfile) unpickler.persistent_load = persistent_load newp = BytesIO() pickler = PersistentPickler(persistent_id, newp, _protocol) pickler.dump(unpickler.load()) pickler.dump(unpickler.load()) data = newp.getvalue() if blob_filename is not None: self._storage.storeBlob(oid, None, data, blob_filename, '', transaction) else: self._storage.store(oid, None, data, '', transaction)
def open(self, mode="r"): if mode not in valid_modes: raise ValueError("invalid mode", mode) if mode == 'c': if (self._p_blob_uncommitted or not self._p_blob_committed or self._p_blob_committed.endswith(SAVEPOINT_SUFFIX) ): raise BlobError('Uncommitted changes') return self._p_jar._storage.openCommittedBlobFile( self._p_oid, self._p_serial) if self.writers: raise BlobError("Already opened for writing.") if self.readers is None: self.readers = [] if mode == 'r': result = None to_open = self._p_blob_uncommitted if not to_open: to_open = self._p_blob_committed if to_open: result = self._p_jar._storage.openCommittedBlobFile( self._p_oid, self._p_serial, self) else: self._create_uncommitted_file() to_open = self._p_blob_uncommitted assert to_open if result is None: result = BlobFile(to_open, mode, self) def destroyed(ref, readers=self.readers): try: readers.remove(ref) except ValueError: pass self.readers.append(weakref.ref(result, destroyed)) else: if self.readers: raise BlobError("Already opened for reading.") if mode == 'w': if self._p_blob_uncommitted is None: self._create_uncommitted_file() result = BlobFile(self._p_blob_uncommitted, mode, self) else: # 'r+' and 'a' if self._p_blob_uncommitted is None: # Create a new working copy self._create_uncommitted_file() result = BlobFile(self._p_blob_uncommitted, mode, self) if self._p_blob_committed: with open(self._p_blob_committed, 'rb') as fp: utils.cp(fp, result) if mode == 'r+': result.seek(0) else: # Re-use existing working copy result = BlobFile(self._p_blob_uncommitted, mode, self) def destroyed(ref, writers=self.writers): try: writers.remove(ref) except ValueError: pass self.writers.append(weakref.ref(result, destroyed)) self._p_changed = True return result
def _importDuringCommit(self, transaction, f, return_oid_list): """Import data during two-phase commit. Invoked by the transaction manager mid commit. Appends one item, the OID of the first object created, to return_oid_list. """ oids = {} # IMPORTANT: This code should be consistent with the code in # serialize.py. It is currently out of date and doesn't handle # weak references. def persistent_load(ooid): """Remap a persistent id to a new ID and create a ghost for it.""" klass = None if isinstance(ooid, tuple): ooid, klass = ooid if ooid in oids: oid = oids[ooid] else: if klass is None: oid = self._storage.new_oid() else: oid = self._storage.new_oid(), klass oids[ooid] = oid return Ghost(oid) while 1: header = f.read(16) if header == export_end_marker: break if len(header) != 16: raise ExportError("Truncated export file") # Extract header information ooid = header[:8] length = u64(header[8:16]) data = f.read(length) if len(data) != length: raise ExportError("Truncated export file") if oids: oid = oids[ooid] if isinstance(oid, tuple): oid = oid[0] else: oids[ooid] = oid = self._storage.new_oid() return_oid_list.append(oid) # Blob support blob_begin = f.read(len(blob_begin_marker)) if blob_begin == blob_begin_marker: # Copy the blob data to a temporary file # and remember the name blob_len = u64(f.read(8)) blob_filename = mktemp() blob_file = open(blob_filename, "wb") cp(f, blob_file, blob_len) blob_file.close() else: f.seek(-len(blob_begin_marker), 1) blob_filename = None pfile = StringIO(data) unpickler = Unpickler(pfile) unpickler.persistent_load = persistent_load newp = StringIO() pickler = Pickler(newp, 1) pickler.inst_persistent_id = persistent_id pickler.dump(unpickler.load()) pickler.dump(unpickler.load()) data = newp.getvalue() if blob_filename is not None: self._storage.storeBlob(oid, None, data, blob_filename, "", transaction) else: self._storage.store(oid, None, data, "", transaction)
def open(self, mode="r"): if mode not in valid_modes: raise ValueError("invalid mode", mode) if mode == 'c': if (self._p_blob_uncommitted or not self._p_blob_committed or self._p_blob_committed.endswith(SAVEPOINT_SUFFIX) ): raise BlobError('Uncommitted changes') return self._p_jar._storage.openCommittedBlobFile( self._p_oid, self._p_serial) if self.writers: raise BlobError("Already opened for writing.") if self.readers is None: self.readers = [] if mode == 'r': result = None to_open = self._p_blob_uncommitted if not to_open: to_open = self._p_blob_committed if to_open: result = self._p_jar._storage.openCommittedBlobFile( self._p_oid, self._p_serial, self) else: self._create_uncommitted_file() to_open = self._p_blob_uncommitted assert to_open if result is None: result = BlobFile(to_open, mode, self) def destroyed(ref, readers=self.readers): try: readers.remove(ref) except ValueError: pass self.readers.append(weakref.ref(result, destroyed)) else: if self.readers: raise BlobError("Already opened for reading.") if mode == 'w': if self._p_blob_uncommitted is None: self._create_uncommitted_file() result = BlobFile(self._p_blob_uncommitted, mode, self) else: # 'r+' and 'a' if self._p_blob_uncommitted is None: # Create a new working copy self._create_uncommitted_file() result = BlobFile(self._p_blob_uncommitted, mode, self) if self._p_blob_committed: utils.cp(open(self._p_blob_committed), result) if mode == 'r+': result.seek(0) else: # Re-use existing working copy result = BlobFile(self._p_blob_uncommitted, mode, self) def destroyed(ref, writers=self.writers): try: writers.remove(ref) except ValueError: pass self.writers.append(weakref.ref(result, destroyed)) self._p_changed = True return result
def _importDuringCommit(self, transaction, f, return_oid_list): """Import data during two-phase commit. Invoked by the transaction manager mid commit. Appends one item, the OID of the first object created, to return_oid_list. """ oids = {} # IMPORTANT: This code should be consistent with the code in # serialize.py. It is currently out of date and doesn't handle # weak references. def persistent_load(ooid): """Remap a persistent id to a new ID and create a ghost for it.""" klass = None if isinstance(ooid, tuple): ooid, klass = ooid if not isinstance(ooid, bytes): assert isinstance(ooid, str) # this happens on Python 3 when all bytes in the oid are < 0x80 ooid = ooid.encode('ascii') if ooid in oids: oid = oids[ooid] else: if klass is None: oid = self._storage.new_oid() else: oid = self._storage.new_oid(), klass oids[ooid] = oid return Ghost(oid) while 1: header = f.read(16) if header == export_end_marker: break if len(header) != 16: raise ExportError("Truncated export file") # Extract header information ooid = header[:8] length = u64(header[8:16]) data = f.read(length) if len(data) != length: raise ExportError("Truncated export file") if oids: oid = oids[ooid] if isinstance(oid, tuple): oid = oid[0] else: oids[ooid] = oid = self._storage.new_oid() return_oid_list.append(oid) if (b'blob' in data and isinstance(self._reader.getGhost(data), Blob) ): # Blob support # Make sure we have a (redundant, overly) blob marker. if f.read(len(blob_begin_marker)) != blob_begin_marker: raise ValueError("No data for blob object") # Copy the blob data to a temporary file # and remember the name blob_len = u64(f.read(8)) blob_filename = mktemp(self._storage.temporaryDirectory()) blob_file = open(blob_filename, "wb") cp(f, blob_file, blob_len) blob_file.close() else: blob_filename = None pfile = BytesIO(data) unpickler = Unpickler(pfile) unpickler.persistent_load = persistent_load newp = BytesIO() pickler = PersistentPickler(persistent_id, newp, _protocol) pickler.dump(unpickler.load()) pickler.dump(unpickler.load()) data = newp.getvalue() if blob_filename is not None: self._storage.storeBlob(oid, None, data, blob_filename, '', transaction) else: self._storage.store(oid, None, data, '', transaction)