def setup_quoting(self): """ Set QuotedRPath versions of important RPaths if chars_to_quote is set. Return True if quoting needed to be done, False else. """ # FIXME the problem is that the chars_to_quote can come from the command # line but can also be a value coming from the repository itself, # set globally by the fs_abilities.xxx_set_globals functions. if not Globals.chars_to_quote: return False if Globals.get_api_version() < 201: # compat200 FilenameMapping.set_init_quote_vals() self.base_dir = FilenameMapping.get_quotedrpath(self.base_dir) self.data_dir = FilenameMapping.get_quotedrpath(self.data_dir) self.incs_dir = FilenameMapping.get_quotedrpath(self.incs_dir) else: self.base_dir = map_filenames.get_quotedrpath(self.base_dir) self.data_dir = map_filenames.get_quotedrpath(self.data_dir) self.incs_dir = map_filenames.get_quotedrpath(self.incs_dir) Globals.set_all('rbdir', self.data_dir) # compat200 return True
def set_special_escapes(self, rbdir): """Set escape_dos_devices and escape_trailing_spaces from rdiff-backup-data dir, just like chars_to_quote""" se_rp = rbdir.append("special_escapes") if se_rp.lstat(): se = se_rp.get_string().split("\n") actual_edd = ("escape_dos_devices" in se) actual_ets = ("escape_trailing_spaces" in se) else: log.Log( "The special escapes file '{ef}' was not found, " "will assume need to escape DOS devices and trailing " "spaces based on file systems".format(ef=se_rp), log.WARNING) if getattr(self, "src_fsa", None) is not None: actual_edd = (self.src_fsa.escape_dos_devices and not self.dest_fsa.escape_dos_devices) actual_ets = (self.src_fsa.escape_trailing_spaces and not self.dest_fsa.escape_trailing_spaces) else: # Single filesystem operation actual_edd = self.dest_fsa.escape_dos_devices actual_ets = self.dest_fsa.escape_trailing_spaces Globals.set_all('escape_dos_devices', actual_edd) log.Log("Backup: escape_dos_devices = {dd}".format(dd=actual_edd), log.INFO) Globals.set_all('escape_trailing_spaces', actual_ets) log.Log("Backup: escape_trailing_spaces = {ts}".format(ts=actual_ets), log.INFO)
def InternalBackup(source_local, dest_local, src_dir, dest_dir, current_time=None, eas=None, acls=None): """Backup src to dest internally This is like rdiff_backup but instead of running a separate rdiff-backup script, use the separate *.py files. This way the script doesn't have to be rebuild constantly, and stacktraces have correct line/file references. """ Globals.current_time = current_time Globals.security_level = "override" Globals.set("no_compression_regexp_string", os.fsencode(actions.DEFAULT_NOT_COMPRESSED_REGEXP)) cmdpairs = _internal_get_cmd_pairs(source_local, dest_local, src_dir, dest_dir) Security.initialize("backup", cmdpairs) rpin, rpout = list(map(SetConnections.cmdpair2rp, cmdpairs)) for attr in ('eas_active', 'eas_write', 'eas_conn'): SetConnections.UpdateGlobal(attr, eas) for attr in ('acls_active', 'acls_write', 'acls_conn'): SetConnections.UpdateGlobal(attr, acls) Main._misc_setup([rpin, rpout]) Main._action_backup(rpin, rpout) Main._cleanup()
def set_special_escapes(self, repo): """ Set escape_dos_devices and escape_trailing_spaces from rdiff-backup-data dir, just like chars_to_quote """ se = repo.get_special_escapes() if se is not None: actual_edd = ("escape_dos_devices" in se) actual_ets = ("escape_trailing_spaces" in se) else: if getattr(self, "src_fsa", None) is not None: actual_edd = (self.src_fsa.escape_dos_devices and not self.dest_fsa.escape_dos_devices) actual_ets = (self.src_fsa.escape_trailing_spaces and not self.dest_fsa.escape_trailing_spaces) else: # Single filesystem operation actual_edd = self.dest_fsa.escape_dos_devices actual_ets = self.dest_fsa.escape_trailing_spaces Globals.set_all('escape_dos_devices', actual_edd) log.Log("Backup: escape_dos_devices = {dd}".format(dd=actual_edd), log.INFO) Globals.set_all('escape_trailing_spaces', actual_ets) log.Log("Backup: escape_trailing_spaces = {ts}".format(ts=actual_ets), log.INFO)
def setup(self, src_dir=None, owners_map=None): if self.must_be_writable and not self._create(): return 1 if (self.can_be_sub_path and self.base_dir.conn is Globals.local_connection): Security.reset_restrict_path(self.base_dir) Globals.set_all('rbdir', self.data_dir) # compat200 if Globals.get_api_version() >= 201: # compat200 if self.base_dir.conn is Globals.local_connection: # should be more efficient than going through the connection from rdiffbackup.locations import _repo_shadow self._shadow = _repo_shadow.RepoShadow else: self._shadow = self.base_dir.conn._repo_shadow.RepoShadow if self.must_be_writable: self.fs_abilities = self._shadow.get_fs_abilities_readwrite( self.base_dir) else: self.fs_abilities = self._shadow.get_fs_abilities_readonly( self.base_dir) if not self.fs_abilities: return 1 # something was wrong else: log.Log( "--- Repository file system capabilities ---\n" + str(self.fs_abilities), log.INFO) if src_dir is None: self.remote_transfer = None # just in case ret_code = fs_abilities.SingleRepoSetGlobals(self)() if ret_code != 0: return ret_code else: # FIXME this shouldn't be necessary, and the setting of variable # across the connection should happen through the shadow Globals.set_all("backup_writer", self.base_dir.conn) self.base_dir.conn.Globals.set_local("isbackup_writer", True) # this is the new way, more dedicated but not sufficient yet self.remote_transfer = (src_dir.base_dir.conn is not self.base_dir.conn) ret_code = fs_abilities.Dir2RepoSetGlobals(src_dir, self)() if ret_code != 0: return ret_code self.setup_quoting() self.setup_paths() if owners_map is not None: ret_code = self.init_owners_mapping(**owners_map) if ret_code != 0: return ret_code return 0 # all is good
def BackupRestoreSeries(source_local, dest_local, list_of_dirnames, compare_hardlinks = 1, dest_dirname = "testfiles/output", restore_dirname = "testfiles/rest_out", compare_backups = 1, compare_eas = 0, compare_acls = 0, compare_ownership = 0): """Test backing up/restoring of a series of directories The dirnames correspond to a single directory at different times. After each backup, the dest dir will be compared. After the whole set, each of the earlier directories will be recovered to the restore_dirname and compared. """ Globals.set('preserve_hardlinks', compare_hardlinks) time = 10000 dest_rp = rpath.RPath(Globals.local_connection, dest_dirname) restore_rp = rpath.RPath(Globals.local_connection, restore_dirname) Myrm(dest_dirname) for dirname in list_of_dirnames: src_rp = rpath.RPath(Globals.local_connection, dirname) reset_hardlink_dicts() _reset_connections(src_rp, dest_rp) InternalBackup(source_local, dest_local, dirname, dest_dirname, time, eas = compare_eas, acls = compare_acls) time += 10000 _reset_connections(src_rp, dest_rp) if compare_backups: assert CompareRecursive(src_rp, dest_rp, compare_hardlinks, compare_eas = compare_eas, compare_acls = compare_acls, compare_ownership = compare_ownership) time = 10000 for dirname in list_of_dirnames[:-1]: reset_hardlink_dicts() Myrm(restore_dirname) InternalRestore(dest_local, source_local, dest_dirname, restore_dirname, time, eas = compare_eas, acls = compare_acls) src_rp = rpath.RPath(Globals.local_connection, dirname) assert CompareRecursive(src_rp, restore_rp, compare_eas = compare_eas, compare_acls = compare_acls, compare_ownership = compare_ownership) # Restore should default back to newest time older than it # with a backup then. if time == 20000: time = 21000 time += 10000
def get_readonly_fsa(desc_string, rp): """Return an fsa with given description_string Will be initialized read_only with given RPath rp. We separate this out into a separate function so the request can be vetted by the security module. """ if os.name == "nt": log.Log("Hardlinks disabled by default on Windows", log.INFO) Globals.set_all('preserve_hardlinks', 0) return FSAbilities(desc_string, rp, read_only=True)
def set_current_time(reftime=None): """ Sets the current time in curtime and curtimestr on all systems """ if reftime is None: reftime = time.time() if Globals.get_api_version() < 201: # compat200 for conn in Globals.connections: conn.Time.setcurtime_local(int(reftime)) else: Globals.set_all("current_time", reftime) Globals.set_all("current_time_string", timetostring(reftime))
def set_chars_to_quote(self, rbdir): """Set chars_to_quote from rdiff-backup-data dir""" if Globals.chars_to_quote is not None: return # already overridden ctq_rp = rbdir.append(b"chars_to_quote") if ctq_rp.lstat(): Globals.set_all("chars_to_quote", ctq_rp.get_bytes()) else: log.Log( "chars_to_quote file '{qf}' not found, assuming no quoting " "required in backup repository".format(qf=ctq_rp), log.WARNING) Globals.set_all("chars_to_quote", b"")
def set_chars_to_quote(self, rbdir): """Set chars_to_quote setting for backup session Unlike most other options, the chars_to_quote setting also depends on the current settings in the rdiff-backup-data directory, not just the current fs features. """ ctq = self._compare_ctq_file(rbdir, self._get_ctq_from_fsas()) Globals.set_all('chars_to_quote', ctq) if Globals.chars_to_quote: FilenameMapping.set_init_quote_vals()
def set_special_escapes(self, repo): """ Escaping DOS devices and trailing periods/spaces works like regular filename escaping. If only the destination requires it, then we do it. Otherwise, it is not necessary, since the files couldn't have been created in the first place. We also record whether we have done it in order to handle the case where a volume which was escaped is later restored by an OS that does not require it. """ suggested_edd = (self.dest_fsa.escape_dos_devices and not self.src_fsa.escape_dos_devices) suggested_ets = (self.dest_fsa.escape_trailing_spaces and not self.src_fsa.escape_trailing_spaces) se = repo.get_special_escapes() if se is None: actual_edd, actual_ets = suggested_edd, suggested_ets se = set() if actual_edd: se.add("escape_dos_devices") if actual_ets: se.add("escape_trailing_spaces") repo.set_special_escapes(se) else: actual_edd = ("escape_dos_devices" in se) actual_ets = ("escape_trailing_spaces" in se) if actual_edd != suggested_edd and not suggested_edd: log.Log( "System no longer needs DOS devices to be escaped, " "but we will retain for backwards compatibility", log.WARNING) if actual_ets != suggested_ets and not suggested_ets: log.Log( "System no longer needs trailing spaces or periods to be " "escaped, but we will retain for backwards compatibility", log.WARNING) Globals.set_all('escape_dos_devices', actual_edd) log.Log("Backup: escape_dos_devices = {dd}".format(dd=actual_edd), log.INFO) Globals.set_all('escape_trailing_spaces', actual_ets) log.Log("Backup: escape_trailing_spaces = {ts}".format(ts=actual_ets), log.INFO)
def setup(self): """ Prepare the execution of the action. """ # Set default change ownership flag, umask, relay regexps os.umask(0o77) Time.setcurtime(Globals.current_time) SetConnections.UpdateGlobal("client_conn", Globals.local_connection) Globals.postset_regexp('no_compression_regexp', Globals.no_compression_regexp_string) for conn in Globals.connections: conn.robust.install_signal_handlers() conn.Hardlink.initialize_dictionaries() return 0
def run(self): # do regress the target directory if necessary if self._operate_regress(): # regress was necessary and failed return 1 previous_time = self.repo.get_mirror_time(refresh=True) if previous_time < 0 or previous_time >= Time.getcurtime(): log.Log( "Either there is more than one current_mirror or " "the last backup is not in the past. Aborting.", log.ERROR) return 1 if Globals.get_api_version() < 201: # compat200 if previous_time: Time.setprevtime_compat200(previous_time) self.repo.base_dir.conn.Main.backup_touch_curmirror_local( self.dir.base_dir, self.repo.base_dir) backup.mirror_and_increment_compat200(self.dir.base_dir, self.repo.base_dir, self.repo.incs_dir) self.repo.base_dir.conn.Main.backup_remove_curmirror_local() else: backup.mirror_compat200(self.dir.base_dir, self.repo.base_dir) self.repo.base_dir.conn.Main.backup_touch_curmirror_local( self.dir.base_dir, self.repo.base_dir) self.repo.base_dir.conn.Main.backup_close_statistics(time.time()) else: # API 201 and higher self._operate_backup(previous_time) return 0
def init_owners_mapping(self, users_map=None, groups_map=None, preserve_num_ids=False): """ initialize mapping of users and groups (aka owners) Shadow function for _repo_shadow.RepoShadow/_dir_shadow.DirShadow users_map and groups_map are file descriptors opened in text mode """ if users_map is not None: users_map = users_map.read() if groups_map is not None: groups_map = groups_map.read() if Globals.get_api_version() < 201: # compat200 self.base_dir.conn.user_group.init_user_mapping( users_map, preserve_num_ids) self.base_dir.conn.user_group.init_group_mapping( groups_map, preserve_num_ids) else: self._shadow.init_owners_mapping(users_map, groups_map, preserve_num_ids) return 0 # all is good
def setup(self): # in setup we return as soon as we detect an issue to avoid changing # too much return_code = super().setup() if return_code != 0: return return_code return_code = self.repo.setup() if return_code != 0: return return_code # set the filesystem properties of the repository self.repo.base_dir.conn.fs_abilities.single_set_globals( self.repo.base_dir, 1) # read_only=True self.repo.init_quoting(self.values.chars_to_quote) if Globals.get_api_version() < 201: # compat200 self.mirror_rpath = self.repo.base_dir.new_index( self.repo.restore_index) self.inc_rpath = self.repo.data_dir.append_path( b'increments', self.repo.restore_index) # FIXME move method _get_parsed_time to Repo and remove inc_rpath? self.action_time = self._get_parsed_time(self.values.at, ref_rp=self.inc_rpath) if self.action_time is None: return 1 return 0 # all is good
def setup(self): # in setup we return as soon as we detect an issue to avoid changing # too much return_code = super().setup() if return_code != 0: return return_code return_code = self.repo.setup() if return_code != 0: return return_code # set the filesystem properties of the repository if Globals.get_api_version() < 201: # compat200 self.repo.base_dir.conn.fs_abilities.single_set_globals( self.repo.base_dir, 0) # read_only=False self.repo.setup_quoting() # TODO validate how much of the following lines and methods # should go into the directory/repository modules if log.Log.verbosity > 0: try: # the source repository must be writable log.Log.open_logfile( self.repo.data_dir.append(self.name + ".log")) except (log.LoggerError, Security.Violation) as exc: log.Log( "Unable to open logfile due to exception '{ex}'".format( ex=exc), log.ERROR) return 1 return 0
def run(self): runtime_info = Globals.get_runtime_info() print( yaml.safe_dump(runtime_info, explicit_start=True, explicit_end=True)) return 0
def setup(self): # in setup we return as soon as we detect an issue to avoid changing # too much return_code = super().setup() if return_code & Globals.RET_CODE_ERR: return return_code return_code = self.repo.setup() if return_code & Globals.RET_CODE_ERR: return return_code # set the filesystem properties of the repository if Globals.get_api_version() < 201: # compat200 self.repo.base_dir.conn.fs_abilities.single_set_globals( self.repo.base_dir, 1) # read_only=True self.repo.setup_quoting() if self.values.entity == "files": if self.values.changed_since: self.action_time = self._get_parsed_time( self.values.changed_since, ref_rp=self.repo.ref_inc) elif self.values.at: self.action_time = self._get_parsed_time( self.values.at, ref_rp=self.repo.ref_inc) if self.action_time is None: return Globals.RET_CODE_ERR return Globals.RET_CODE_OK
def set_select(self, select_opts, select_data): """ Set the selection and selection data on the directory Accepts a tuple of two lists: * one of selection tuple made of (selection method, parameter) * and one of the content of the selection files Saves the selections list and makes it ready for usage on the source side over its connection. """ # FIXME not sure we couldn't support symbolic links nowadays on Windows # knowing that it would require specific handling when reading the link: # File "rdiff_backup\rpath.py", line 771, in symlink # TypeError: symlink: src should be string, bytes or os.PathLike, not NoneType # I suspect that not all users can read symlinks with os.readlink if (self.base_dir.conn.os.name == 'nt' and ("--exclude-symbolic-links", None) not in select_opts): log.Log("Symbolic links excluded by default on Windows", log.NOTE) select_opts.insert(0, ("--exclude-symbolic-links", None)) if Globals.get_api_version() < 201: # compat200 self.base_dir.conn.backup.SourceStruct.set_source_select( self.base_dir, select_opts, *list(map(io.BytesIO, select_data))) else: # FIXME we're retransforming bytes into a file pointer self._shadow.set_select(self.base_dir, select_opts, *list(map(io.BytesIO, select_data)))
def _operate_regress(self, try_regress=True): """ Check the given repository and regress it if necessary """ if Globals.get_api_version() < 201: # compat200 if self.repo.needs_regress_compat200(): if not try_regress: return 1 log.Log("Previous backup seems to have failed, regressing " "destination now", log.WARNING) try: self.repo.base_dir.conn.regress.Regress(self.repo.base_dir) return 0 except Security.Violation: log.Log( "Security violation while attempting to regress " "destination, perhaps due to --restrict-read-only or " "--restrict-update-only", log.ERROR) return 1 else: log.Log("Given repository doesn't need to be regressed", log.NOTE) return 0 # all is good else: if self.repo.needs_regress(): if not try_regress: return 1 log.Log("Previous backup seems to have failed, regressing " "destination now", log.WARNING) return self.repo.regress() else: log.Log("Given repository doesn't need to be regressed", log.NOTE) return 0 # all is good
def run(self): # This is more a check than a part of run, but because backup does # the regress in the run section, we also do the check here... if self.source.needs_regress(): # source could be read-only, so we don't try to regress it log.Log( "Previous backup to {rp} seems to have failed. " "Use rdiff-backup to 'regress' first the failed backup, " "then try again to restore".format(rp=self.source.base_dir), log.ERROR) return 1 try: if Globals.get_api_version() < 201: # compat200 restore.Restore( self.source.base_dir.new_index(self.source.restore_index), self.inc_rpath, self.target.base_dir, self.action_time) else: self._operate_restore() except OSError as exc: log.Log( "Could not complete restore due to exception '{ex}'".format( ex=exc), log.ERROR) return 1 else: log.Log("Restore successfully finished", log.INFO) return 0
def setup(self, src_repo, owners_map=None): ret_code = super().setup() if ret_code != 0: return ret_code if Globals.get_api_version() >= 201: # compat200 if self.base_dir.conn is Globals.local_connection: # should be more efficient than going through the connection from rdiffbackup.locations import _dir_shadow self._shadow = _dir_shadow.WriteDirShadow else: self._shadow = self.base_dir.conn._dir_shadow.WriteDirShadow self.fs_abilities = self._shadow.get_fs_abilities(self.base_dir) if not self.fs_abilities: return 1 # something was wrong else: log.Log( "--- Write directory file system capabilities ---\n" + str(self.fs_abilities), log.INFO) return fs_abilities.Repo2DirSetGlobals(src_repo, self)() if owners_map is not None: ret_code = self.init_owners_mapping(**owners_map) if ret_code != 0: return ret_code return 0 # all is good
def recreate_attr(self, regress_time): """ Make regress_time mirror_metadata snapshot by patching We write to a tempfile first. Otherwise, in case of a crash, it would seem we would have an intact snapshot and partial diff, not the reverse. """ temprp = [self.data_dir.get_temp_rpath()] def callback(rp): temprp[0] = rp # Before API 201, metafiles couldn't be compressed writer = self._meta_main_class( temprp[0], 'wb', compress=(Globals.compression or Globals.get_api_version() < 201), check_path=0, callback=callback) for rorp in self._get_meta_main_at_time(regress_time, None): writer.write_object(rorp) writer.close() finalrp = self.data_dir.append( b"mirror_metadata.%b.snapshot.gz" % Time.timetobytes(regress_time)) assert not finalrp.lstat(), ( "Metadata path '{mrp}' shouldn't exist.".format(mrp=finalrp)) rpath.rename(temprp[0], finalrp) if Globals.fsync_directories: self.data_dir.fsync()
def get_cmd_pairs(locations, remote_schema=None, ssh_compression=True, remote_tempdir=None, term_verbosity=None): """Map the given file descriptions into command pairs Command pairs are tuples cmdpair with length 2. cmdpair[0] is None iff it describes a local path, and cmdpair[1] is the path. """ # This is the schema that determines how rdiff-backup will open a # pipe to the remote system. If the file is given as A::B, {h}/%s will # be substituted with A in the schema. if remote_schema: cmd_schema = remote_schema else: if ssh_compression: cmd_schema = b"ssh -C {h} rdiff-backup" else: cmd_schema = b"ssh {h} rdiff-backup" if remote_tempdir: cmd_schema += (b" --tempdir=" + remote_tempdir) # we could wait until the verbosity is "transferred" to the remote side # but we might miss important messages at the beginning of the process if term_verbosity is not None: cmd_schema += b" --terminal-verbosity %d" % term_verbosity if Globals.get_api_version() > 200: # compat200 cmd_schema += b" server" else: cmd_schema += b" --server" if not locations: return [] desc_triples = list(map(parse_location, locations)) # was any error string be returned as third in the list? for err in [triple[2] for triple in desc_triples if triple[2]]: raise SetConnectionsException(err) if remote_schema and not [x for x in desc_triples if x[0]]: # remote schema defined but no remote location found log.Log("Remote schema option ignored - no remote file descriptions", log.WARNING) # strip the error field from the triples to get pairs desc_pairs = [triple[:2] for triple in desc_triples] def desc2cmd_pairs(desc_pair): """Return pair (remote_cmd, filename) from desc_pair""" host_info, filename = desc_pair if not host_info: return (None, filename) else: return (_fill_schema(host_info, cmd_schema), filename) cmd_pairs = list(map(desc2cmd_pairs, desc_pairs)) return cmd_pairs
def _writer_helper(self, typestr, time, meta_class, force=False): """ Returns a writer class or None if the meta class isn't active. For testing purposes, the force option allows to skip the activity validation. """ if time is None: timestr = Time.getcurtimestr() else: timestr = Time.timetobytes(time) triple = map(os.fsencode, (meta_class.get_prefix(), timestr, typestr)) filename = b'.'.join(triple) rp = self.data_dir.append(filename) assert not rp.lstat(), "File '{rp}' shouldn't exist.".format(rp=rp) assert rp.isincfile(), ( "Path '{irp}' must be an increment file.".format(irp=rp)) if meta_class.is_active() or force: # Before API 201, metafiles couldn't be compressed return meta_class(rp, 'w', compress=(Globals.compression or Globals.get_api_version() < 201), callback=self._add_incrp) else: return None
def setup(self): # in setup we return as soon as we detect an issue to avoid changing # too much return_code = super().setup() if return_code & Globals.RET_CODE_ERR: return return_code return_code = self.dir.setup() if return_code & Globals.RET_CODE_ERR: return return_code return_code = self.repo.setup(self.dir) if return_code & Globals.RET_CODE_ERR: return return_code # set the filesystem properties of the repository if Globals.get_api_version() < 201: # compat200 self.repo.base_dir.conn.fs_abilities.single_set_globals( self.repo.base_dir, 1) # read_only=True self.repo.setup_quoting() (select_opts, select_data) = selection.get_prepared_selections( self.values.selections) self.dir.set_select(select_opts, select_data) # FIXME move method _get_parsed_time to Repo? self.action_time = self._get_parsed_time(self.values.at, ref_rp=self.repo.ref_inc) if self.action_time is None: return Globals.RET_CODE_ERR return Globals.RET_CODE_OK
def set_select(cls, rp, tuplelist, *filelists): """ Initialize select object using tuplelist Note that each list in filelists must each be passed as separate arguments, so each is recognized as a file by the connection. Otherwise we will get an error because a list containing files can't be pickled. Also, cls._select needs to be cached so get_diffs below can retrieve the necessary rps. """ sel = selection.Select(rp) sel.parse_selection_args(tuplelist, filelists) sel_iter = sel.set_iter() cache_size = Globals.pipeline_max_length * 3 # to and from+leeway cls._select = rorpiter.CacheIndexable(sel_iter, cache_size) Globals.set('select_mirror', sel_iter)
def test_default_actual_api(self): """validate that the default version is the actual one or the one explicitly set""" output = subprocess.check_output([RBBin, b'info']) api_version = yaml.safe_load(output)['exec']['api_version'] self.assertEqual(Globals.get_api_version(), api_version['default']) api_param = os.fsencode(str(api_version['max'])) output = subprocess.check_output([RBBin, b'--api-version', api_param, b'info']) out_info = yaml.safe_load(output) self.assertEqual(out_info['exec']['api_version']['actual'], api_version['max'])
def _list_files_at_time(self): """List files in archive under rp that are present at restoretime""" if Globals.get_api_version() < 201: rorp_iter = self.repo.base_dir.conn.restore.ListAtTime( self.mirror_rpath, self.inc_rpath, self.action_time) else: rorp_iter = self.repo.list_files_at_time(self.action_time) for rorp in rorp_iter: print(str(rorp))
def MirrorTest(source_local, dest_local, list_of_dirnames, compare_hardlinks = 1, dest_dirname = "testfiles/output"): """Mirror each of list_of_dirnames, and compare after each""" Globals.set('preserve_hardlinks', compare_hardlinks) dest_rp = rpath.RPath(Globals.local_connection, dest_dirname) old_force_val = Main.force Main.force = 1 Myrm(dest_dirname) for dirname in list_of_dirnames: src_rp = rpath.RPath(Globals.local_connection, dirname) reset_hardlink_dicts() _reset_connections(src_rp, dest_rp) InternalMirror(source_local, dest_local, dirname, dest_dirname) _reset_connections(src_rp, dest_rp) assert CompareRecursive(src_rp, dest_rp, compare_hardlinks) Main.force = old_force_val
import unittest, os from commontest import * from rdiff_backup import Globals, log """Root tests - contain tests which need to be run as root. Some of the quoting here may not work with csh (works on bash). Also, if you aren't me, check out the 'user' global variable. """ Globals.set('change_source_perms', None) Globals.counter = 0 verbosity = 5 log.Log.setverbosity(verbosity) user = '******' # Non-root user to su to userid = 500 # id of user above assert os.getuid() == 0, "Run this test as root!" def Run(cmd): print "Running: ", cmd assert not os.system(cmd) class RootTest(unittest.TestCase): dirlist1 = ["testfiles/root", "testfiles/various_file_types", "testfiles/increment4"] dirlist2 = ["testfiles/increment4", "testfiles/root", "testfiles/increment1"] def testLocal1(self): BackupRestoreSeries(1, 1, self.dirlist1, compare_ownership = 1) def testLocal2(self):
import unittest, os from commontest import * from rdiff_backup import Globals, SetConnections, log, rpath, backup, fs_abilities """Regression tests This one must be run in the rdiff-backup directory, as it requres chdir-wrapper, the various rdiff-backup files, and the directory testfiles """ Globals.set("change_source_perms", 1) Globals.counter = 0 Globals.security_level = "override" log.Log.setverbosity(5) def get_local_rp(extension): return rpath.RPath(Globals.local_connection, "testfiles/" + extension) class Local: """This is just a place to put increments relative to the local connection""" inc1rp = get_local_rp("increment1") inc2rp = get_local_rp("increment2") inc3rp = get_local_rp("increment3") inc4rp = get_local_rp("increment4")