예제 #1
0
    def recreate_attr(self, regress_time):
        """
        Make regress_time mirror_metadata snapshot by patching

        We write to a tempfile first.  Otherwise, in case of a crash, it
        would seem we would have an intact snapshot and partial diff, not
        the reverse.
        """
        temprp = [self.data_dir.get_temp_rpath()]

        def callback(rp):
            temprp[0] = rp

        writer = self._meta_main_class(temprp[0],
                                       'wb',
                                       check_path=0,
                                       callback=callback)
        for rorp in self._get_meta_main_at_time(regress_time, None):
            writer.write_object(rorp)
        writer.close()

        finalrp = self.data_dir.append(b"mirror_metadata.%b.snapshot.gz" %
                                       Time.timetobytes(regress_time))
        assert not finalrp.lstat(), (
            "Metadata path '{mrp}' shouldn't exist.".format(mrp=finalrp))
        rpath.rename(temprp[0], finalrp)
        if Globals.fsync_directories:
            self.data_dir.fsync()
예제 #2
0
    def _restore_orig_regfile(self, rf):
        """Restore original regular file

        This is the trickiest case for avoiding information loss,
        because we don't want to delete the increment before the
        mirror is fully written.

        """
        assert rf.metadata_rorp.isreg(), (
            "Metadata path '{mp}' can only be regular file.".format(
                mp=rf.metadata_rorp))
        if rf.mirror_rp.isreg():
            tf = rf.mirror_rp.get_temp_rpath(sibling=True)
            tf.write_from_fileobj(rf.get_restore_fp())
            tf.fsync_with_dir()  # make sure tf fully written before move
            rpath.copy_attribs(rf.metadata_rorp, tf)
            rpath.rename(tf, rf.mirror_rp)  # move is atomic
        else:
            if rf.mirror_rp.lstat():
                rf.mirror_rp.delete()
            rf.mirror_rp.write_from_fileobj(rf.get_restore_fp())
            rpath.copy_attribs(rf.metadata_rorp, rf.mirror_rp)
        if Globals.fsync_directories:
            rf.mirror_rp.get_parent_rp().fsync(
            )  # force move before inc delete
예제 #3
0
 def end_process_directory(self):
     """Finish processing directory"""
     if self.dir_update:
         assert self.base_rp.isdir(), (
             "Base path '{brp}' must be a directory.".format(
                 brp=self.base_rp))
         rpath.copy_attribs(self.dir_update, self.base_rp)
     else:
         assert self.dir_replacement, (
             "Replacement directory must be defined.")
         self.base_rp.rmdir()
         if self.dir_replacement.lstat():
             rpath.rename(self.dir_replacement, self.base_rp)
예제 #4
0
    def end_process_directory(self):
        """Finish processing directory"""
        if self.dir_update:
            assert self.base_rp.isdir(), (
                "Base directory '{rp}' isn't a directory.".format(
                    rp=self.base_rp))
            rpath.copy_attribs(self.dir_update, self.base_rp)

            if (Globals.process_uid != 0
                    and self.dir_update.getperms() % 0o1000 < 0o700):
                # Directory was unreadable at start -- keep it readable
                # until the end of the backup process.
                self.base_rp.chmod(0o700 | self.dir_update.getperms())
        elif self.dir_replacement:
            self.base_rp.rmdir()
            if self.dir_replacement.lstat():
                rpath.rename(self.dir_replacement, self.base_rp)
예제 #5
0
    def test_long_socket(self):
        """Test backing up a directory with long sockets in them

		For some reason many unicies don't allow sockets with long
		names to be made in the usual way.

		"""
        sockdir = rpath.RPath(Globals.local_connection, "testfiles/sockettest")
        re_init_dir(sockdir)
        tmp_sock = sockdir.append("sock")
        tmp_sock.mksock()
        sock1 = sockdir.append(
            "Long_socket_name---------------------------------------------------------------------------------------------------"
        )
        self.assertRaises(rpath.SkipFileException, sock1.mksock)
        rpath.rename(tmp_sock, sock1)
        assert sock1.issock()
        sock2 = sockdir.append("Medium_socket_name--------------------------------------------------------------")
        sock2.mksock()

        Myrm(Local.rpout.path)
        InternalBackup(1, 1, sockdir.path, Local.rpout.path, current_time=1)
        InternalBackup(1, 1, "testfiles/empty", Local.rpout.path, current_time=10000)
예제 #6
0
	def test_long_socket(self):
		"""Test backing up a directory with long sockets in them

		For some reason many unicies don't allow sockets with long
		names to be made in the usual way.

		"""
		sockdir = rpath.RPath(Globals.local_connection, "testfiles/sockettest")
		re_init_dir(sockdir)
		tmp_sock = sockdir.append("sock")
		tmp_sock.mksock()
		sock1 = sockdir.append("Long_socket_name---------------------------------------------------------------------------------------------------")
		self.assertRaises(rpath.SkipFileException, sock1.mksock)
		rpath.rename(tmp_sock, sock1)
		assert sock1.issock()
		sock2 = sockdir.append("Medium_socket_name--------------------------------------------------------------")
		sock2.mksock()

		Myrm(Local.rpout.path)
		InternalBackup(1, 1, sockdir.path, Local.rpout.path,
					   current_time = 1)
		InternalBackup(1, 1, "testfiles/empty", Local.rpout.path,
					   current_time = 10000)
예제 #7
0
    def test_moving_hardlinks(self):
        """Test moving the first hardlinked file in a series to later place in the series.

        This test is directed at some previously buggy code that failed to
        always keep a sha1 hash in the metadata for the first (and only the
        first) file among a series of linked files. The condition that
        triggered this bug involved removing the first file from a list of
        linked files, while also adding a new file at some later position in
        the list. The total number of hardlinked files in the list remains
        unchanged.  None of the files had a sha1 hash saved in its metadata.
        The bug was originally reported here:
        https://savannah.nongnu.org/bugs/?26848
        """

        # Setup initial backup
        MakeOutputDir()
        output = rpath.RPath(Globals.local_connection, abs_output_dir)
        hlsrc_dir = os.path.join(abs_test_dir, b"src_hardlink")

        hlsrc = rpath.RPath(Globals.local_connection, hlsrc_dir)
        if hlsrc.lstat():
            hlsrc.delete()
        hlsrc.mkdir()
        hlsrc_sub = hlsrc.append("subdir")
        hlsrc_sub.mkdir()
        hl_file1 = hlsrc_sub.append("hardlink1")
        hl_file1.write_string(self.hello_str)
        hl_file2 = hlsrc_sub.append("hardlink2")
        hl_file2.hardlink(hl_file1.path)

        InternalBackup(1, 1, hlsrc.path, output.path, 10000)
        out_subdir = output.append("subdir")
        self.assertEqual(
            out_subdir.append("hardlink1").getinode(),
            out_subdir.append("hardlink2").getinode())

        # validate that hashes and link counts are correctly saved in metadata
        meta_prefix = rpath.RPath(
            Globals.local_connection,
            os.path.join(abs_output_dir, b"rdiff-backup-data",
                         b"mirror_metadata"))
        incs = meta_prefix.get_incfiles_list()
        self.assertEqual(len(incs), 1)
        metadata_rp = incs[0]
        hashes, link_counts = self.extract_metadata(metadata_rp)
        # hashes for ., ./subdir, ./subdir/hardlink1, ./subdir/hardlink3
        expected_hashes = [None, None, self.hello_str_hash, None]
        self.assertEqual(expected_hashes, hashes)
        expected_link_counts = [1, 1, 2, 2]
        self.assertEqual(expected_link_counts, link_counts)

        # Move the first hardlinked file to be last
        hl_file3 = hlsrc_sub.append("hardlink3")
        rpath.rename(hl_file1, hl_file3)

        InternalBackup(1, 1, hlsrc.path, output.path, 20000)
        self.assertEqual(
            out_subdir.append("hardlink2").getinode(),
            out_subdir.append("hardlink3").getinode())

        # validate that hashes and link counts are correctly saved in metadata
        incs = meta_prefix.get_incfiles_list()
        self.assertEqual(len(incs), 2)
        if incs[0].getinctype() == b'snapshot':
            metadata_rp = incs[0]
        else:
            metadata_rp = incs[1]
        hashes, link_counts = self.extract_metadata(metadata_rp)
        # hashes for ., ./subdir/, ./subdir/hardlink2, ./subdir/hardlink3
        expected_hashes = [None, None, self.hello_str_hash, None]
        # The following assertion would fail as a result of bugs that are now fixed
        self.assertEqual(expected_hashes, hashes)
        expected_link_counts = [1, 1, 2, 2]
        self.assertEqual(expected_link_counts, link_counts)

        # Now try restoring, still checking hard links.
        sub_path = os.path.join(abs_output_dir, b"subdir")
        restore_path = os.path.join(abs_test_dir, b"hl_restore")
        restore_dir = rpath.RPath(Globals.local_connection, restore_path)
        hlrestore_file1 = restore_dir.append("hardlink1")
        hlrestore_file2 = restore_dir.append("hardlink2")
        hlrestore_file3 = restore_dir.append("hardlink3")

        if restore_dir.lstat():
            restore_dir.delete()
        InternalRestore(1, 1, sub_path, restore_path, 10000)
        for rp in [hlrestore_file1, hlrestore_file2]:
            rp.setdata()
        self.assertEqual(hlrestore_file1.getinode(),
                         hlrestore_file2.getinode())

        if restore_dir.lstat():
            restore_dir.delete()
        InternalRestore(1, 1, sub_path, restore_path, 20000)
        for rp in [hlrestore_file2, hlrestore_file3]:
            rp.setdata()
        self.assertEqual(hlrestore_file2.getinode(),
                         hlrestore_file3.getinode())
예제 #8
0
 def fast_process_file(self, index, diff_rorp):
     """Patch base_rp with diff_rorp (case where neither is directory)"""
     rp = self._get_rp_from_root(index)
     tf = rp.get_temp_rpath(sibling=True)
     self._patch_to_temp(rp, diff_rorp, tf)
     rpath.rename(tf, rp)
예제 #9
0
def _write_via_tempfile(fp, rp):
    """Write fileobj fp to rp by writing to tempfile and renaming"""
    tf = rp.get_temp_rpath(sibling=True)
    retval = tf.write_from_fileobj(fp)
    rpath.rename(tf, rp)
    return retval