def get_backup_extract(): print fmt_title("Executing Duplicity to download %s to %s " % (address, raw_download_path)) downloader(raw_download_path, target, log=_print if not silent else None, debug=opt_debug, force=opt_force) return raw_download_path
def packages(self): newpkgs_file = self.extras.newpkgs if not exists(newpkgs_file): return packages = file(newpkgs_file).read().strip() packages = [] if not packages else packages.split('\n') if not packages: return print fmt_title( "PACKAGES - %d new packages listed in %s" % (len(packages), newpkgs_file), '-') already_installed = set(pkgman.installed()) & set(packages) if len(already_installed) == len(packages): print "ALL NEW PACKAGES ALREADY INSTALLED\n" return if already_installed: print "// New packages not already installed: %d" % ( len(packages) - len(already_installed)) # apt-get update, otherwise installer may skip everything print "// Update list of available packages" print print "# apt-get update" system("apt-get update") installer = pkgman.Installer(packages, self.PACKAGES_BLACKLIST) print print "// Installing new packages" if installer.skipping: print "// Skipping uninstallable packages: " + " ".join( installer.skipping) print if not installer.command: print "NO NEW PACKAGES TO INSTALL\n" return print "# " + installer.command if not self.simulate: exitcode = installer() if exitcode != 0: print "# WARNING: non-zero exitcode (%d)" % exitcode if self.rollback: self.rollback.save_new_packages(installer.installed) print
def packages(self): newpkgs_file = self.extras.newpkgs if not exists(newpkgs_file): return packages = file(newpkgs_file).read().strip() packages = [] if not packages else packages.split('\n') if not packages: return print fmt_title("PACKAGES - %d new packages listed in %s" % (len(packages), newpkgs_file), '-') already_installed = set(pkgman.installed()) & set(packages) if len(already_installed) == len(packages): print "ALL NEW PACKAGES ALREADY INSTALLED\n" return if already_installed: print "// New packages not already installed: %d" % (len(packages) - len(already_installed)) # apt-get update, otherwise installer may skip everything print "// Update list of available packages" print print "# apt-get update" system("apt-get update") installer = pkgman.Installer(packages, self.PACKAGES_BLACKLIST) print print "// Installing new packages" if installer.skipping: print "// Skipping uninstallable packages: " + " ".join(installer.skipping) print if not installer.command: print "NO NEW PACKAGES TO INSTALL\n" return print "# " + installer.command if not self.simulate: exitcode = installer() if exitcode != 0: print "# WARNING: non-zero exitcode (%d)" % exitcode if self.rollback: self.rollback.save_new_packages(installer.installed) print
def _create_extras(self, extras, profile, conf): os.mkdir(extras.path) os.chmod(extras.path, 0700) etc = str(extras.etc) os.mkdir(etc) self._log(" mkdir " + etc) self._log("\n// needed to automatically detect and fix file ownership issues\n") shutil.copy("/etc/passwd", etc) self._log(" cp /etc/passwd " + etc) shutil.copy("/etc/group", etc) self._log(" cp /etc/group " + etc) if not conf.skip_packages or not conf.skip_files: self._log("\n" + fmt_title("Comparing current system state to the base state in the backup profile", '-')) if not conf.skip_packages and exists(profile.packages): self._write_new_packages(extras.newpkgs, profile.packages) if not conf.skip_files: # support empty profiles dirindex = profile.dirindex if exists(profile.dirindex) else "/dev/null" dirindex_conf = profile.dirindex_conf if exists(profile.dirindex_conf) else "/dev/null" self._write_whatchanged(extras.fsdelta, extras.fsdelta_olist, dirindex, dirindex_conf, conf.overrides.fs) if not conf.skip_database: try: if mysql.MysqlService.is_running(): self._log("\n" + fmt_title("Serializing MySQL database to " + extras.myfs, '-')) mysql.backup(extras.myfs, extras.etc.mysql, limits=conf.overrides.mydb, callback=mysql.cb_print()) if self.verbose else None except mysql.Error: pass try: if pgsql.PgsqlService.is_running(): self._log("\n" + fmt_title("Serializing PgSQL databases to " + extras.pgfs, '-')) pgsql.backup(extras.pgfs, conf.overrides.pgdb, callback=pgsql.cb_print() if self.verbose else None) except pgsql.Error: pass
def database(self): if not exists(self.extras.myfs) and not exists(self.extras.pgfs): return if self.rollback: self.rollback.save_database() if exists(self.extras.myfs): print fmt_title("DATABASE - unserializing MySQL databases from " + self.extras.myfs) try: mysql.restore(self.extras.myfs, self.extras.etc.mysql, limits=self.limits.mydb, callback=mysql.cb_print(), simulate=self.simulate) except mysql.Error, e: print "SKIPPING MYSQL DATABASE RESTORE: " + str(e)
hooks.restore.pre() if not backup_extract_path: backup_extract_path = get_backup_extract() extras_paths = backup.ExtrasPaths(backup_extract_path) if not isdir(extras_paths.path): fatal( "missing %s directory - this doesn't look like a system backup" % extras_paths.path) os.environ['TKLBAM_BACKUP_EXTRACT_PATH'] = backup_extract_path if not silent: print fmt_title("Restoring system from backup extract at " + backup_extract_path) restore = Restore(backup_extract_path, limits=opt_limits, rollback=not no_rollback, simulate=opt_simulate) if restore.conf: os.environ['TKLBAM_RESTORE_PROFILE_ID'] = restore.conf.profile_id hooks.restore.inspect(restore.extras.path) if opt_debug: print """\ The --debug option has (again) dropped you into an interactive shell so that you can explore the state of the system just before restore. The current working directory contains the backup extract.
def __init__(self, profile, overrides, skip_files=False, skip_packages=False, skip_database=False, resume=False, verbose=True, extras_root="/"): self.verbose = verbose if not profile: raise self.Error("can't backup without a profile") profile_paths = ProfilePaths(profile.path) extras_paths = ExtrasPaths(extras_root) # decide whether we can allow resume=True # /TKLBAM has to exist and the backup configuration has to match backup_conf = BackupConf(profile.profile_id, overrides, skip_files, skip_packages, skip_database) saved_backup_conf = BackupConf.fromfile(extras_paths.backup_conf) if backup_conf != saved_backup_conf: resume = False if not resume: _rmdir(extras_paths.path) else: self._log("ATTEMPTING TO RESUME ABORTED BACKUP SESSION") self.resume = resume # create or re-use /TKLBAM if not exists(extras_paths.path): self._log( fmt_title( "Creating %s (contains backup metadata and database dumps)" % extras_paths.path)) self._log(" mkdir -p " + extras_paths.path) try: self._create_extras(extras_paths, profile_paths, backup_conf) backup_conf.tofile(extras_paths.backup_conf) except: # destroy potentially incomplete extras _rmdir(extras_paths.path) raise # print uncompressed footprint if verbose: # files in /TKLBAM + /TKLBAM/fsdelta-olist fpaths = _fpaths(extras_paths.path) if not skip_files: fsdelta_olist = file( extras_paths.fsdelta_olist).read().splitlines() fpaths += _filter_deleted(fsdelta_olist) size = sum([os.lstat(fpath).st_size for fpath in fpaths]) if size > 1024 * 1024 * 1024: size_fmt = "%.2f GB" % (float(size) / (1024 * 1024 * 1024)) elif size > 1024 * 1024: size_fmt = "%.2f MB" % (float(size) / (1024 * 1024)) else: size_fmt = "%.2f KB" % (float(size) / 1024) self._log("\nUNCOMPRESSED BACKUP SIZE: %s in %d files" % (size_fmt, len(fpaths))) self.extras_paths = extras_paths
def _create_extras(self, extras, profile, conf): os.mkdir(extras.path) os.chmod(extras.path, 0700) etc = str(extras.etc) os.mkdir(etc) self._log(" mkdir " + etc) self._log( "\n// needed to automatically detect and fix file ownership issues\n" ) shutil.copy("/etc/passwd", etc) self._log(" cp /etc/passwd " + etc) shutil.copy("/etc/group", etc) self._log(" cp /etc/group " + etc) if not conf.skip_packages or not conf.skip_files: self._log("\n" + fmt_title( "Comparing current system state to the base state in the backup profile", '-')) if not conf.skip_packages and exists(profile.packages): self._write_new_packages(extras.newpkgs, profile.packages) if not conf.skip_files: # support empty profiles dirindex = profile.dirindex if exists( profile.dirindex) else "/dev/null" dirindex_conf = profile.dirindex_conf if exists( profile.dirindex_conf) else "/dev/null" self._write_whatchanged(extras.fsdelta, extras.fsdelta_olist, dirindex, dirindex_conf, conf.overrides.fs) if not conf.skip_database: try: if mysql.MysqlService.is_running(): self._log("\n" + fmt_title( "Serializing MySQL database to " + extras.myfs, '-')) mysql.backup( extras.myfs, extras.etc.mysql, limits=conf.overrides.mydb, callback=mysql.cb_print()) if self.verbose else None except mysql.Error: pass try: if pgsql.PgsqlService.is_running(): self._log("\n" + fmt_title( "Serializing PgSQL databases to " + extras.pgfs, '-')) pgsql.backup( extras.pgfs, conf.overrides.pgdb, callback=pgsql.cb_print() if self.verbose else None) except pgsql.Error: pass
try: hooks.restore.pre() if not backup_extract_path: backup_extract_path = get_backup_extract() extras_paths = backup.ExtrasPaths(backup_extract_path) if not isdir(extras_paths.path): fatal("missing %s directory - this doesn't look like a system backup" % extras_paths.path) os.environ['TKLBAM_BACKUP_EXTRACT_PATH'] = backup_extract_path if not silent: print fmt_title("Restoring system from backup extract at " + backup_extract_path) restore = Restore(backup_extract_path, limits=opt_limits, rollback=not no_rollback, simulate=opt_simulate) if restore.conf: os.environ['TKLBAM_RESTORE_PROFILE_ID'] = restore.conf.profile_id hooks.restore.inspect(restore.extras.path) if opt_debug: print """\ The --debug option has (again) dropped you into an interactive shell so that you can explore the state of the system just before restore. The current working directory contains the backup extract. To exit from the shell and continue the restore run "exit 0". To exit from the shell and abort the restore run "exit 1".
try: hb.set_backup_inprogress(registry.hbr.backup_id, bool) except hb.Error, e: warn("can't update Hub of backup %s: %s" % ("in progress" if bool else "completed", str(e))) try: backup_inprogress(True) def _print(s): if s == "\n": print else: print "# " + str(s) if raw_upload_path: print fmt_title("Executing Duplicity to backup %s to %s" % (raw_upload_path, target.address)) _print("export PASSPHRASE=$(cat %s)" % conf.secretfile) uploader = duplicity.Uploader(True, conf.volsize, conf.full_backup, conf.s3_parallel_uploads) uploader(raw_upload_path, target, force_cleanup=not opt_resume, dry_run=opt_simulate, debug=opt_debug, log=_print) else: hooks.backup.pre() b = backup.Backup(registry.profile, conf.overrides, conf.backup_skip_files, conf.backup_skip_packages, conf.backup_skip_database, opt_resume, True, dump_path if dump_path else "/")
def files(self): extras = self.extras if not exists(extras.fsdelta): return overlay = self.backup_extract_path simulate = self.simulate rollback = self.rollback limits = self.limits.fs print fmt_title("FILES - restoring files, ownership and permissions", '-') passwd, group, uidmap, gidmap = self._userdb_merge(extras.etc, "/etc") if uidmap or gidmap: print "MERGING USERS AND GROUPS:\n" for olduid in uidmap: print " UID %d => %d" % (olduid, uidmap[olduid]) for oldgid in gidmap: print " GID %d => %d" % (oldgid, gidmap[oldgid]) print changes = Changes.fromfile(extras.fsdelta, limits) deleted = list(changes.deleted()) if rollback: rollback.save_files(changes, overlay) fsdelta_olist = self._get_fsdelta_olist(extras.fsdelta_olist, limits) if fsdelta_olist: print "OVERLAY:\n" for fpath in fsdelta_olist: print " " + fpath if not simulate: self._apply_overlay(overlay, '/', fsdelta_olist) print statfixes = list(changes.statfixes(uidmap, gidmap)) if statfixes or deleted: print "POST-OVERLAY FIXES:\n" for action in statfixes: print " " + str(action) if not simulate: action() for action in deleted: print " " + str(action) # rollback moves deleted to 'originals' if not simulate and not rollback: action() if statfixes or deleted: print def w(path, s): file(path, "w").write(str(s)) if not simulate: w("/etc/passwd", passwd) w("/etc/group", group)
class Restore: Error = Error PACKAGES_BLACKLIST = ['linux-*', 'vmware-tools*'] def __init__(self, backup_extract_path, limits=[], rollback=True, simulate=False): self.extras = backup.ExtrasPaths(backup_extract_path) if not isdir(self.extras.path): raise self.Error("illegal backup_extract_path: can't find '%s'" % self.extras.path) if simulate: rollback = False self.conf = AttrDict(simplejson.loads(file(self.extras.backup_conf).read())) \ if exists(self.extras.backup_conf) else None self.simulate = simulate self.rollback = Rollback.create() if rollback else None self.limits = conf.Limits(limits) self.backup_extract_path = backup_extract_path def database(self): if not exists(self.extras.myfs) and not exists(self.extras.pgfs): return if self.rollback: self.rollback.save_database() if exists(self.extras.myfs): print fmt_title("DATABASE - unserializing MySQL databases from " + self.extras.myfs) try: mysql.restore(self.extras.myfs, self.extras.etc.mysql, limits=self.limits.mydb, callback=mysql.cb_print(), simulate=self.simulate) except mysql.Error, e: print "SKIPPING MYSQL DATABASE RESTORE: " + str(e) if exists(self.extras.pgfs): print "\n" + fmt_title( "DATABASE - Unserializing PgSQL databases from " + self.extras.pgfs) if self.simulate: print "CAN'T SIMULATE PGSQL RESTORE, SKIPPING" return try: pgsql.restore(self.extras.pgfs, self.limits.pgdb, callback=pgsql.cb_print()) except pgsql.Error, e: print "SKIPPING PGSQL DATABASE RESTORE: " + str(e)
def __init__(self, profile, overrides, skip_files=False, skip_packages=False, skip_database=False, resume=False, verbose=True, extras_root="/"): self.verbose = verbose if not profile: raise self.Error("can't backup without a profile") profile_paths = ProfilePaths(profile.path) extras_paths = ExtrasPaths(extras_root) # decide whether we can allow resume=True # /TKLBAM has to exist and the backup configuration has to match backup_conf = BackupConf(profile.profile_id, overrides, skip_files, skip_packages, skip_database) saved_backup_conf = BackupConf.fromfile(extras_paths.backup_conf) if backup_conf != saved_backup_conf: resume = False if not resume: _rmdir(extras_paths.path) else: self._log("ATTEMPTING TO RESUME ABORTED BACKUP SESSION") self.resume = resume # create or re-use /TKLBAM if not exists(extras_paths.path): self._log(fmt_title("Creating %s (contains backup metadata and database dumps)" % extras_paths.path)) self._log(" mkdir -p " + extras_paths.path) try: self._create_extras(extras_paths, profile_paths, backup_conf) backup_conf.tofile(extras_paths.backup_conf) except: # destroy potentially incomplete extras _rmdir(extras_paths.path) raise # print uncompressed footprint if verbose: # files in /TKLBAM + /TKLBAM/fsdelta-olist fpaths= _fpaths(extras_paths.path) if not skip_files: fsdelta_olist = file(extras_paths.fsdelta_olist).read().splitlines() fpaths += _filter_deleted(fsdelta_olist) size = sum([ os.lstat(fpath).st_size for fpath in fpaths ]) if size > 1024 * 1024 * 1024: size_fmt = "%.2f GB" % (float(size) / (1024 * 1024 * 1024)) elif size > 1024 * 1024: size_fmt = "%.2f MB" % (float(size) / (1024 * 1024)) else: size_fmt = "%.2f KB" % (float(size) / 1024) self._log("\nUNCOMPRESSED BACKUP SIZE: %s in %d files" % (size_fmt, len(fpaths))) self.extras_paths = extras_paths