def end_process(self): """Finish processing directory""" if self.dir_update: assert self.base_rp.isdir() rpath.copy_attribs(self.dir_update, self.base_rp) if (Globals.process_uid != 0 and self.dir_update.getperms() % 01000 < 0700): # Directory was unreadable at start -- keep it readable # until the end of the backup process. self.base_rp.chmod(0700 | self.dir_update.getperms())
def patch_to_temp(self, basis_rp, diff_rorp, new): """Patch basis_rp, writing output in new, which doesn't exist yet Returns true if able to write new as desired, false if UpdateError or similar gets in the way. """ if diff_rorp.isflaglinked(): self.patch_hardlink_to_temp(diff_rorp, new) elif diff_rorp.get_attached_filetype() == 'snapshot': result = self.patch_snapshot_to_temp(diff_rorp, new) if not result: return 0 elif result == 2: return 1 # SpecialFile elif not self.patch_diff_to_temp(basis_rp, diff_rorp, new): return 0 if new.lstat() and not diff_rorp.isflaglinked(): rpath.copy_attribs(diff_rorp, new) return self.matches_cached_rorp(diff_rorp, new)
def patch_snapshot_to_temp(self, diff_rorp, new): """Write diff_rorp to new, return true if successful Returns 1 if normal success, 2 if special file is written, whether or not it is successful. This is because special files either fail with a SpecialFileError, or don't need to be compared. """ if diff_rorp.isspecial(): self.write_special(diff_rorp, new) rpath.copy_attribs(diff_rorp, new) return 2 report = robust.check_common_error(self.error_handler, rpath.copy, (diff_rorp, new)) if isinstance(report, hash.Report): self.CCPP.update_hash(diff_rorp.index, report.sha1_digest) return 1 return report != 0 # if == 0, error_handler caught something
def restore_orig_regfile(self, rf): """Restore original regular file This is the trickiest case for avoiding information loss, because we don't want to delete the increment before the mirror is fully written. """ assert rf.metadata_rorp.isreg() if rf.mirror_rp.isreg(): tf = TempFile.new(rf.mirror_rp) tf.write_from_fileobj(rf.get_restore_fp()) tf.fsync_with_dir() # make sure tf fully written before move rpath.copy_attribs(rf.metadata_rorp, tf) rpath.rename(tf, rf.mirror_rp) # move is atomic else: if rf.mirror_rp.lstat(): rf.mirror_rp.delete() rf.mirror_rp.write_from_fileobj(rf.get_restore_fp()) rpath.copy_attribs(rf.metadata_rorp, rf.mirror_rp) if Globals.fsync_directories: rf.mirror_rp.get_parent_rp().fsync() # force move before inc delete
def restore_orig_regfile(self, rf): """Restore original regular file This is the trickiest case for avoiding information loss, because we don't want to delete the increment before the mirror is fully written. """ assert rf.metadata_rorp.isreg() if rf.mirror_rp.isreg(): tf = TempFile.new(rf.mirror_rp) tf.write_from_fileobj(rf.get_restore_fp()) tf.fsync_with_dir() # make sure tf fully written before move rpath.copy_attribs(rf.metadata_rorp, tf) rpath.rename(tf, rf.mirror_rp) # move is atomic else: if rf.mirror_rp.lstat(): rf.mirror_rp.delete() rf.mirror_rp.write_from_fileobj(rf.get_restore_fp()) rpath.copy_attribs(rf.metadata_rorp, rf.mirror_rp) if Globals.fsync_directories: rf.mirror_rp.get_parent_rp().fsync( ) # force move before inc delete
def end_process(self): """Finish processing a directory""" rf = self.rf if rf.metadata_rorp.isdir(): if rf.mirror_rp.isdir(): rf.mirror_rp.setdata() if not rf.metadata_rorp.equal_loose(rf.mirror_rp): log.Log("Regressing attributes of " + rf.mirror_rp.path, 5) rpath.copy_attribs(rf.metadata_rorp, rf.mirror_rp) else: rf.mirror_rp.delete() log.Log("Regressing file " + rf.mirror_rp.path, 5) rpath.copy_with_attribs(rf.metadata_rorp, rf.mirror_rp) else: # replacing a dir with some other kind of file assert rf.mirror_rp.isdir() log.Log("Replacing directory " + rf.mirror_rp.path, 5) if rf.metadata_rorp.isreg(): self.restore_orig_regfile(rf) else: rf.mirror_rp.delete() rpath.copy_with_attribs(rf.metadata_rorp, rf.mirror_rp) if rf.regress_inc: log.Log("Deleting increment " + rf.regress_inc.path, 5) rf.regress_inc.delete()