def get_inc_select(): """Return iterator of increment rpaths""" inc_base = Globals.rbdir.append_path('increments', index) for base_inc in restore.get_inclist(inc_base): yield base_inc if inc_base.isdir(): inc_select = selection.Select(inc_base).set_iter() for inc in inc_select: yield inc
def checkdest_need_check(dest_rp): """Return None if no dest dir found, 1 if dest dir needs check, 0 o/w""" if not dest_rp.isdir() or not Globals.rbdir.isdir(): return None for filename in Globals.rbdir.listdir(): if filename not in ['chars_to_quote', 'backup.log']: break else: # This may happen the first backup just after we test for quoting return None curmirroot = Globals.rbdir.append("current_mirror") curmir_incs = restore.get_inclist(curmirroot) if not curmir_incs: Log.FatalError( """Bad rdiff-backup-data dir on destination side The rdiff-backup data directory %s exists, but we cannot find a valid current_mirror marker. You can avoid this message by removing the rdiff-backup-data directory; however any data in it will be lost. Probably this error was caused because the first rdiff-backup session into a new directory failed. If this is the case it is safe to delete the rdiff-backup-data directory because there is no important information in it. """ % (Globals.rbdir.path,)) elif len(curmir_incs) == 1: return 0 else: if not force: try: curmir_incs[0].conn.regress.check_pids(curmir_incs) except (OSError, IOError), exc: Log.FatalError("Could not check if rdiff-backup is currently" "running due to\n%s" % exc) assert len(curmir_incs) == 2, "Found too many current_mirror incs!" return 1
def backup_get_mirrortime(): """Return time in seconds of previous mirror, or None if cannot""" incbase = Globals.rbdir.append_path("current_mirror") mirror_rps = restore.get_inclist(incbase) assert len(mirror_rps) <= 1, \ "Found %s current_mirror rps, expected <=1" % (len(mirror_rps),) if mirror_rps: return mirror_rps[0].getinctime() else: return None
def get_inc(self): """Return inc_rpath, if any, corresponding to self.index""" incroot = Globals.rbdir.append_path("increments") incbase = incroot.new_index(self.index) inclist = restore.get_inclist(incbase) inclist = filter( lambda inc: inc.getinctime() == unsuccessful_backup_time, inclist) assert len(inclist) <= 1 if inclist: return inclist[0] else: return None
def get_inc(self): """Return inc_rpath, if any, corresponding to self.index""" incroot = Globals.rbdir.append_path("increments") incbase = incroot.new_index(self.index) inclist = restore.get_inclist(incbase) inclist = filter(lambda inc: inc.getinctime() == unsuccessful_backup_time, inclist) assert len(inclist) <= 1 if inclist: return inclist[0] else: return None
def backup_remove_curmirror_local(): """Remove the older of the current_mirror files. Use at end of session""" assert Globals.rbdir.conn is Globals.local_connection curmir_incs = restore.get_inclist(Globals.rbdir.append("current_mirror")) assert len(curmir_incs) == 2 if curmir_incs[0].getinctime() < curmir_incs[1].getinctime(): older_inc = curmir_incs[0] else: older_inc = curmir_incs[1] C.sync() # Make sure everything is written before curmirror is removed older_inc.delete()
def ListIncrements(rp): """Print out a summary of the increments and their times""" rp = require_root_set(rp, 1) restore_check_backup_dir(restore_root) mirror_rp = restore_root.new_index(restore_index) inc_rpath = Globals.rbdir.append_path('increments', restore_index) incs = restore.get_inclist(inc_rpath) mirror_time = restore.MirrorStruct.get_mirror_time() if Globals.parsable_output: print manage.describe_incs_parsable(incs, mirror_time, mirror_rp) else: print manage.describe_incs_human(incs, mirror_time, mirror_rp)
def get_summary_triples(mirror_total, time_dict): """Return list of triples (time, size, cumulative size)""" triples = [] cur_mir_base = Globals.rbdir.append('current_mirror') mirror_time = restore.get_inclist(cur_mir_base)[0].getinctime() triples.append((mirror_time, mirror_total, mirror_total)) inc_times = time_dict.keys() inc_times.sort() inc_times.reverse() cumulative_size = mirror_total for inc_time in inc_times: size = time_dict[inc_time] cumulative_size += size triples.append((inc_time, size, cumulative_size)) return triples
def iterate_raw_rfs(mirror_rp, inc_rp): """Iterate all RegressFile objects in mirror/inc directory Also changes permissions of unreadable files. We don't have to change them back later because regress will do that for us. """ root_rf = RegressFile(mirror_rp, inc_rp, restore.get_inclist(inc_rp)) def helper(rf): mirror_rp = rf.mirror_rp if Globals.process_uid != 0: if mirror_rp.isreg() and not mirror_rp.readable(): mirror_rp.chmod(0400 | mirror_rp.getperms()) elif mirror_rp.isdir() and not mirror_rp.hasfullperms(): mirror_rp.chmod(0700 | mirror_rp.getperms()) yield rf if rf.mirror_rp.isdir() or rf.inc_rp.isdir(): for sub_rf in rf.yield_sub_rfs(): for sub_sub_rf in helper(sub_rf): yield sub_sub_rf return helper(root_rf)
def rot_check_time(time_string): """Check remove older than time_string, return time in seconds""" try: time = Time.genstrtotime(time_string) except Time.TimeException, exc: Log.FatalError(str(exc)) times_in_secs = [inc.getinctime() for inc in restore.get_inclist(Globals.rbdir.append_path("increments"))] times_in_secs = filter(lambda t: t < time, times_in_secs) if not times_in_secs: Log("No increments older than %s found, exiting." % (Time.timetopretty(time),), 3) return None times_in_secs.sort() inc_pretty_time = "\n".join(map(Time.timetopretty, times_in_secs)) if len(times_in_secs) > 1 and not force: Log.FatalError("Found %d relevant increments, dated:\n%s" "\nIf you want to delete multiple increments in this way, " "use the --force." % (len(times_in_secs), inc_pretty_time)) if len(times_in_secs) == 1: Log("Deleting increment at time:\n" + inc_pretty_time, 3) else: Log("Deleting increments at times:\n" + inc_pretty_time, 3) return times_in_secs[-1]+1 # make sure we don't delete current increment
def checkdest_need_check(dest_rp): """Return None if no dest dir found, 1 if dest dir needs check, 0 o/w""" if not dest_rp.isdir() or not Globals.rbdir.isdir(): return None for filename in Globals.rbdir.listdir(): if filename not in ['chars_to_quote', 'special_escapes', 'backup.log']: break else: # This may happen the first backup just after we test for quoting return None curmirroot = Globals.rbdir.append("current_mirror") curmir_incs = restore.get_inclist(curmirroot) if not curmir_incs: Log.FatalError( """Bad rdiff-backup-data dir on destination side The rdiff-backup data directory %s exists, but we cannot find a valid current_mirror marker. You can avoid this message by removing the rdiff-backup-data directory; however any data in it will be lost. Probably this error was caused because the first rdiff-backup session into a new directory failed. If this is the case it is safe to delete the rdiff-backup-data directory because there is no important information in it. """ % (Globals.rbdir.path,)) elif len(curmir_incs) == 1: return 0 else: if not force: try: curmir_incs[0].conn.regress.check_pids(curmir_incs) except (OSError, IOError), exc: Log.FatalError("Could not check if rdiff-backup is currently" "running due to\n%s" % exc) assert len(curmir_incs) == 2, "Found too many current_mirror incs!" return 1
time = rot_check_time(remove_older_than_string) if time is None: return Log("Actual remove older than time: %s" % (time, ), 6) manage.delete_earlier_than(Globals.rbdir, time) def rot_check_time(time_string): """Check remove older than time_string, return time in seconds""" try: time = Time.genstrtotime(time_string) except Time.TimeException, exc: Log.FatalError(str(exc)) times_in_secs = [ inc.getinctime() for inc in restore.get_inclist(Globals.rbdir.append_path("increments")) ] times_in_secs = filter(lambda t: t < time, times_in_secs) if not times_in_secs: Log( "No increments older than %s found, exiting." % (Time.timetopretty(time), ), 3) return None times_in_secs.sort() inc_pretty_time = "\n".join(map(Time.timetopretty, times_in_secs)) if len(times_in_secs) > 1 and not force: Log.FatalError( "Found %d relevant increments, dated:\n%s" "\nIf you want to delete multiple increments in this way, " "use the --force." % (len(times_in_secs), inc_pretty_time))