Exemple #1
0
    def test_session(self):
        """Run actual sessions and make sure proper hashes recorded

        There are a few code paths here we need to test:  creating
        ordinary files, updating ordinary files with diffs, hard
        linking, and keeping files the same.

        """
        in_rp1, hashlist1, in_rp2, hashlist2 = self.make_dirs()
        Myrm(abs_output_dir)

        rdiff_backup(1, 1, in_rp1.path, abs_output_dir, 10000)
        meta_prefix = rpath.RPath(
            Globals.local_connection,
            os.path.join(abs_output_dir, b"rdiff-backup-data",
                         b"mirror_metadata"))
        incs = restore.get_inclist(meta_prefix)
        assert len(incs) == 1
        metadata_rp = incs[0]
        hashlist = self.extract_hashs(metadata_rp)
        assert hashlist == hashlist1, (hashlist1, hashlist)

        rdiff_backup(1, 1, in_rp2.path, abs_output_dir, 20000)
        incs = restore.get_inclist(meta_prefix)
        assert len(incs) == 2
        if incs[0].getinctype() == 'snapshot':
            inc = incs[0]
        else:
            inc = incs[1]
        hashlist = self.extract_hashs(inc)
        assert hashlist == hashlist2, (hashlist2, hashlist)
Exemple #2
0
    def mark_incomplete(self, curtime, rp):
        """Check the date of current mirror

        Return 1 if there are two current_mirror incs and last one has
        time curtime.  Return 0 if only one with time curtime, and
        then add a current_mirror marker.  Return -1 if only one and
        time is not curtime.

        """
        rbdir = rp.append_path("rdiff-backup-data")
        inclist = restore.get_inclist(rbdir.append("current_mirror"))
        assert 1 <= len(inclist) <= 2, str([x.path for x in inclist])

        inc_date_pairs = [(inc.getinctime(), inc) for inc in inclist]
        inc_date_pairs.sort()
        if len(inclist) == 2:
            assert inc_date_pairs[-1][0] == curtime, \
                (inc_date_pairs[-1][0], curtime)
            return 1

        if inc_date_pairs[-1][0] == curtime:
            result = 0
            marker_time = curtime - 10000
        else:
            assert inc_date_pairs[-1][0] == curtime - 10000
            marker_time = curtime
            result = -1

        cur_mirror_rp = rbdir.append("current_mirror.%s.data" %
                                     (Time.timetostring(marker_time), ))
        assert not cur_mirror_rp.lstat()
        cur_mirror_rp.touch()
        return result
Exemple #3
0
    def mark_incomplete(self, curtime, rp):
        """Check the date of current mirror

        Return 1 if there are two current_mirror incs and last one has
        time curtime.  Return 0 if only one with time curtime, and
        then add a current_mirror marker.  Return -1 if only one and
        time is not curtime.

        """
        rbdir = rp.append_path("rdiff-backup-data")
        inclist = restore.get_inclist(rbdir.append("current_mirror"))
        self.assertIn(
            len(inclist), (1, 2),
            "There must be 1 or 2 elements in '{paths_list}'.".format(
                paths_list=str([x.path for x in inclist])))

        inc_date_pairs = [(inc.getinctime(), inc) for inc in inclist]
        inc_date_pairs.sort()
        if len(inclist) == 2:
            self.assertEqual(inc_date_pairs[-1][0], curtime)
            return 1

        if inc_date_pairs[-1][0] == curtime:
            result = 0
            marker_time = curtime - 10000
        else:
            self.assertEqual(inc_date_pairs[-1][0], curtime - 10000)
            marker_time = curtime
            result = -1

        cur_mirror_rp = rbdir.append("current_mirror.%s.data" %
                                     (Time.timetostring(marker_time), ))
        self.assertFalse(cur_mirror_rp.lstat())
        cur_mirror_rp.touch()
        return result
Exemple #4
0
	def mark_incomplete(self, curtime, rp):
		"""Check the date of current mirror

		Return 1 if there are two current_mirror incs and last one has
		time curtime.  Return 0 if only one with time curtime, and
		then add a current_mirror marker.  Return -1 if only one and
		time is not curtime.

		"""
		rbdir = rp.append_path("rdiff-backup-data")
		inclist = restore.get_inclist(rbdir.append("current_mirror"))
		assert 1 <= len(inclist) <= 2, str(map(lambda x: x.path, inclist))

		inc_date_pairs = map(lambda inc: (inc.getinctime(), inc), inclist)
		inc_date_pairs.sort()
		if len(inclist) == 2:
			assert inc_date_pairs[-1][0] == curtime, \
				   (inc_date_pairs[-1][0], curtime)
			return 1

		if inc_date_pairs[-1][0] == curtime:
			result = 0
			marker_time = curtime - 10000
		else:
			assert inc_date_pairs[-1][0] == curtime - 10000
			marker_time = curtime
			result = -1

		cur_mirror_rp = rbdir.append("current_mirror.%s.data" %
									 (Time.timetostring(marker_time),))
		assert not cur_mirror_rp.lstat()
		cur_mirror_rp.touch()
		return result
Exemple #5
0
 def _list_increments(self):
     """Print out a summary of the increments and their times"""
     incs = restore.get_inclist(self.inc_rpath)
     mirror_time = restore.MirrorStruct.get_mirror_time()
     if self.values.parsable_output:
         print(
             manage.describe_incs_parsable(incs, mirror_time,
                                           self.mirror_rpath))
     else:
         print(
             manage.describe_incs_human(incs, mirror_time,
                                        self.mirror_rpath))
Exemple #6
0
    def needs_regress(self):
        """
        Checks if the repository contains a previously failed backup and needs
        to be regressed

        Return None if the repository can't be found,
        True if it needs regressing, False otherwise.
        """
        if not self.base_dir.isdir() or not self.data_dir.isdir():
            return None
        for filename in self.data_dir.listdir():
            # check if we can find any file of importance
            if filename not in [
                    b'chars_to_quote', b'special_escapes', b'backup.log',
                    b'increments'
            ]:
                break
        else:  # This may happen the first backup just after we test for quoting
            if not self.incs_dir.isdir() or not self.incs_dir.listdir():
                return None
        curmirroot = self.data_dir.append(b"current_mirror")
        curmir_incs = restore.get_inclist(curmirroot)  # FIXME belongs here
        if not curmir_incs:
            self.log.FatalError(
                """Bad rdiff-backup-data dir on destination side

The rdiff-backup data directory
{data}
exists, but we cannot find a valid current_mirror marker.  You can
avoid this message by removing the rdiff-backup-data directory;
however any data in it will be lost.

Probably this error was caused because the first rdiff-backup session
into a new directory failed.  If this is the case it is safe to delete
the rdiff-backup-data directory because there is no important
information in it.

""".format(data=self.data_dir.get_safepath()))
        elif len(curmir_incs) == 1:
            return False
        else:
            if not self.force:
                try:
                    curmir_incs[0].conn.regress.check_pids(curmir_incs)
                except (OSError, IOError) as exc:
                    self.log.FatalError(
                        "Could not check if rdiff-backup is currently"
                        "running due to\n{exc}".format(exc=exc))
            assert len(curmir_incs) == 2, (
                "Found more than 2 current_mirror incs in '{rp!s}'.".format(
                    rp=self.data_dir))
            return True
Exemple #7
0
    def testStatistics(self):
        """Test the writing of statistics

        The file sizes are approximate because the size of directories
        could change with different file systems...

        """

        def sorti(inclist):
            templist = [(inc.getinctime(), inc) for inc in inclist]
            templist.sort()
            return [inc for (t, inc) in templist]

        Globals.compression = 1
        Myrm(abs_output_dir)
        InternalBackup(1, 1, os.path.join(old_test_dir, b"stattest1"),
                       abs_output_dir)
        InternalBackup(1, 1, os.path.join(old_test_dir, b"stattest2"),
                       abs_output_dir,
                       time.time() + 1)

        rbdir = rpath.RPath(Globals.local_connection,
                            os.path.join(abs_output_dir, b"rdiff-backup-data"))

        incs = sorti(restore.get_inclist(rbdir.append("session_statistics")))
        self.assertEqual(len(incs), 2)
        s2 = statistics.StatsObj().read_stats_from_rp(incs[0])
        self.assertEqual(s2.SourceFiles, 7)
        self.assertLessEqual(700000, s2.SourceFileSize)
        self.assertLess(s2.SourceFileSize, 750000)
        self.stats_check_initial(s2)

        root_stats = statistics.StatsObj().read_stats_from_rp(incs[1])
        self.assertEqual(root_stats.SourceFiles, 7)
        self.assertLessEqual(550000, root_stats.SourceFileSize)
        self.assertLess(root_stats.SourceFileSize, 570000)
        self.assertEqual(root_stats.MirrorFiles, 7)
        self.assertLessEqual(700000, root_stats.MirrorFileSize)
        self.assertLess(root_stats.MirrorFileSize, 750000)
        self.assertEqual(root_stats.NewFiles, 1)
        self.assertEqual(root_stats.NewFileSize, 0)
        self.assertEqual(root_stats.DeletedFiles, 1)
        self.assertEqual(root_stats.DeletedFileSize, 200000)
        self.assertLessEqual(3, root_stats.ChangedFiles)
        self.assertLessEqual(root_stats.ChangedFiles, 4)
        self.assertLessEqual(450000, root_stats.ChangedSourceSize)
        self.assertLess(root_stats.ChangedSourceSize, 470000)
        self.assertLessEqual(400000, root_stats.ChangedMirrorSize)
        self.assertLess(root_stats.ChangedMirrorSize, 420000)
        self.assertLess(10, root_stats.IncrementFileSize)
        self.assertLess(root_stats.IncrementFileSize, 30000)
Exemple #8
0
def backup_remove_curmirror_local():
    """Remove the older of the current_mirror files.  Use at end of session"""
    assert Globals.rbdir.conn is Globals.local_connection, (
        "Function can only be called locally and not over '{conn}'.".format(
            conn=Globals.rbdir.conn))
    curmir_incs = restore.get_inclist(Globals.rbdir.append(b"current_mirror"))
    assert len(curmir_incs) == 2, (
        "There must be two current mirrors not '{ilen}'.".format(
            ilen=len(curmir_incs)))
    if curmir_incs[0].getinctime() < curmir_incs[1].getinctime():
        older_inc = curmir_incs[0]
    else:
        older_inc = curmir_incs[1]
    if Globals.do_fsync:
        C.sync()  # Make sure everything is written before curmirror is removed
    older_inc.delete()
Exemple #9
0
    def get_mirror_time(self):
        """
        Return time in seconds of previous mirror if possible

        Return -1 if there is more than one mirror,
        or 0 if there is no backup yet.
        """
        incbase = self.data_dir.append_path(b"current_mirror")
        mirror_rps = restore.get_inclist(
            incbase)  # FIXME is probably better here
        if mirror_rps:
            if len(mirror_rps) == 1:
                return mirror_rps[0].getinctime()
            else:  # there is a failed backup and 2+ current_mirror files
                return -1
        else:  # it's the first backup
            return 0  # is always in the past
Exemple #10
0
    def _get_parsed_time(self, time_string):
        """
        Check remove older than time_string, return time in seconds

        Return None if the time string can't be interpreted as such, or
        if more than one increment would be removed, without the force option,
        or if no increment would be removed.
        """
        action_time = super()._get_parsed_time(time_string)
        if action_time is None:
            return None

        times_in_secs = [
            inc.getinctime()
            for inc in restore.get_inclist(self.source.incs_dir)
        ]
        times_in_secs = [t for t in times_in_secs if t < action_time]
        if not times_in_secs:
            self.log(
                "No increments older than {atim} found, exiting.".format(
                    atim=Time.timetopretty(action_time)), self.log.NOTE)
            return None

        times_in_secs.sort()
        pretty_times = "\n".join(map(Time.timetopretty, times_in_secs))
        if len(times_in_secs) > 1:
            if not self.values.force:
                self.log(
                    "Found {lent} relevant increments, dated:\n{ptim}\n"
                    "If you want to delete multiple increments in this way, "
                    "use the --force option.".format(lent=len(times_in_secs),
                                                     ptim=pretty_times),
                    self.log.ERROR)
                return None
            else:
                self.log(
                    "Deleting increments at times:\n{ptim}".format(
                        ptim=pretty_times), self.log.NOTE)
        else:
            self.log(
                "Deleting increment at time:\n{ptim}".format(
                    ptim=pretty_times), self.log.NOTE)
        # make sure we don't delete current increment
        return times_in_secs[-1] + 1
    def testStatistics(self):
        """Test the writing of statistics

		The file sizes are approximate because the size of directories
		could change with different file systems...

		"""
        def sorti(inclist):
            l = [(inc.getinctime(), inc) for inc in inclist]
            l.sort()
            return [inc for (t, inc) in l]

        Globals.compression = 1
        Myrm(abs_output_dir)
        InternalBackup(1, 1, os.path.join(old_test_dir, "stattest1"),
                       abs_output_dir)
        InternalBackup(1, 1, os.path.join(old_test_dir, "stattest2"),
                       abs_output_dir,
                       time.time() + 1)

        rbdir = rpath.RPath(Globals.local_connection,
                            os.path.join(abs_output_dir, "rdiff-backup-data"))

        incs = sorti(restore.get_inclist(rbdir.append("session_statistics")))
        assert len(incs) == 2
        s2 = statistics.StatsObj().read_stats_from_rp(incs[0])
        assert s2.SourceFiles == 7
        assert 700000 <= s2.SourceFileSize < 750000, s2.SourceFileSize
        self.stats_check_initial(s2)

        root_stats = statistics.StatsObj().read_stats_from_rp(incs[1])
        assert root_stats.SourceFiles == 7, root_stats.SourceFiles
        assert 550000 <= root_stats.SourceFileSize < 570000
        assert root_stats.MirrorFiles == 7
        assert 700000 <= root_stats.MirrorFileSize < 750000
        assert root_stats.NewFiles == 1
        assert root_stats.NewFileSize == 0
        assert root_stats.DeletedFiles == 1, root_stats.DeletedFiles
        assert root_stats.DeletedFileSize == 200000
        assert 3 <= root_stats.ChangedFiles <= 4, root_stats.ChangedFiles
        assert 450000 <= root_stats.ChangedSourceSize < 470000
        assert 400000 <= root_stats.ChangedMirrorSize < 420000, \
            root_stats.ChangedMirrorSize
        assert 10 < root_stats.IncrementFileSize < 30000
	def testStatistics(self):
		"""Test the writing of statistics

		The file sizes are approximate because the size of directories
		could change with different file systems...

		"""
		def sorti(inclist):
			l = [(inc.getinctime(), inc) for inc in inclist]
			l.sort()
			return [inc for (t, inc) in l]

		Globals.compression = 1
		Myrm("testfiles/output")
		InternalBackup(1, 1, "testfiles/stattest1", "testfiles/output")
		InternalBackup(1, 1, "testfiles/stattest2", "testfiles/output",
					   time.time()+1)

		rbdir = rpath.RPath(Globals.local_connection,
							"testfiles/output/rdiff-backup-data")

		incs = sorti(restore.get_inclist(rbdir.append("session_statistics")))
		assert len(incs) == 2
		s2 = statistics.StatsObj().read_stats_from_rp(incs[0])
		assert s2.SourceFiles == 7
		assert 700000 <= s2.SourceFileSize < 750000, s2.SourceFileSize
		self.stats_check_initial(s2)

		root_stats = statistics.StatsObj().read_stats_from_rp(incs[1])
		assert root_stats.SourceFiles == 7, root_stats.SourceFiles
		assert 550000 <= root_stats.SourceFileSize < 570000
		assert root_stats.MirrorFiles == 7
		assert 700000 <= root_stats.MirrorFileSize < 750000
		assert root_stats.NewFiles == 1
		assert root_stats.NewFileSize == 0
		assert root_stats.DeletedFiles == 1, root_stats.DeletedFiles
		assert root_stats.DeletedFileSize == 200000
		assert 3 <= root_stats.ChangedFiles <= 4, root_stats.ChangedFiles
		assert 450000 <= root_stats.ChangedSourceSize < 470000
		assert 400000 <= root_stats.ChangedMirrorSize < 420000, \
			   root_stats.ChangedMirrorSize
		assert 10 < root_stats.IncrementFileSize < 30000
Exemple #13
0
    def test_moving_hardlinks(self):
        """Test moving the first hardlinked file in a series to later place in the series.

        This test is directed at some previously buggy code that failed to
        always keep a sha1 hash in the metadata for the first (and only the
        first) file among a series of linked files. The condition that
        triggered this bug involved removing the first file from a list of
        linked files, while also adding a new file at some later position in
        the list. The total number of hardlinked files in the list remains
        unchanged.  None of the files had a sha1 hash saved in its metadata.
        The bug was originally reported here:
        https://savannah.nongnu.org/bugs/?26848
        """

        # Setup initial backup
        MakeOutputDir()
        output = rpath.RPath(Globals.local_connection, abs_output_dir)
        hlsrc_dir = os.path.join(abs_test_dir, b"src_hardlink")

        hlsrc = rpath.RPath(Globals.local_connection, hlsrc_dir)
        if hlsrc.lstat():
            hlsrc.delete()
        hlsrc.mkdir()
        hlsrc_sub = hlsrc.append("subdir")
        hlsrc_sub.mkdir()
        hl_file1 = hlsrc_sub.append("hardlink1")
        hl_file1.write_string(self.hello_str)
        hl_file2 = hlsrc_sub.append("hardlink2")
        hl_file2.hardlink(hl_file1.path)

        InternalBackup(1, 1, hlsrc.path, output.path, 10000)
        out_subdir = output.append("subdir")
        assert out_subdir.append("hardlink1").getinode() == \
            out_subdir.append("hardlink2").getinode()

        # validate that hashes and link counts are correctly saved in metadata
        meta_prefix = rpath.RPath(
            Globals.local_connection,
            os.path.join(abs_output_dir, b"rdiff-backup-data",
                         b"mirror_metadata"))
        incs = restore.get_inclist(meta_prefix)
        assert len(incs) == 1
        metadata_rp = incs[0]
        hashes, link_counts = self.extract_metadata(metadata_rp)
        # hashes for ., ./subdir, ./subdir/hardlink1, ./subdir/hardlink3
        expected_hashes = [None, None, self.hello_str_hash, None]
        assert expected_hashes == hashes, (expected_hashes, hashes)
        expected_link_counts = [1, 1, 2, 2]
        assert expected_link_counts == link_counts, (expected_link_counts, link_counts)

        # Move the first hardlinked file to be last
        hl_file3 = hlsrc_sub.append("hardlink3")
        rpath.rename(hl_file1, hl_file3)

        InternalBackup(1, 1, hlsrc.path, output.path, 20000)
        assert out_subdir.append("hardlink2").getinode() == \
            out_subdir.append("hardlink3").getinode()

        # validate that hashes and link counts are correctly saved in metadata
        incs = restore.get_inclist(meta_prefix)
        assert len(incs) == 2
        if incs[0].getinctype() == b'snapshot':
            metadata_rp = incs[0]
        else:
            metadata_rp = incs[1]
        hashes, link_counts = self.extract_metadata(metadata_rp)
        # hashes for ., ./subdir/, ./subdir/hardlink2, ./subdir/hardlink3
        expected_hashes = [None, None, self.hello_str_hash, None]
        # The following assertion would fail as a result of bugs that are now fixed
        assert expected_hashes == hashes, (expected_hashes, hashes)
        expected_link_counts = [1, 1, 2, 2]
        assert expected_link_counts == link_counts, (expected_link_counts, link_counts)

        # Now try restoring, still checking hard links.
        sub_path = os.path.join(abs_output_dir, b"subdir")
        restore_path = os.path.join(abs_test_dir, b"hl_restore")
        restore_dir = rpath.RPath(Globals.local_connection, restore_path)
        hlrestore_file1 = restore_dir.append("hardlink1")
        hlrestore_file2 = restore_dir.append("hardlink2")
        hlrestore_file3 = restore_dir.append("hardlink3")

        if restore_dir.lstat():
            restore_dir.delete()
        InternalRestore(1, 1, sub_path, restore_path, 10000)
        for rp in [hlrestore_file1, hlrestore_file2]:
            rp.setdata()
        assert hlrestore_file1.getinode() == hlrestore_file2.getinode()

        if restore_dir.lstat():
            restore_dir.delete()
        InternalRestore(1, 1, sub_path, restore_path, 20000)
        for rp in [hlrestore_file2, hlrestore_file3]:
            rp.setdata()
        assert hlrestore_file2.getinode() == hlrestore_file3.getinode()
Exemple #14
0
    def test_adding_hardlinks(self):
        """Test the addition of a new hardlinked file.

        This test is directed at some previously buggy code that 1) failed to
        keep the correct number of hardlinks in the mirror metadata, and 2)
        failed to restore hardlinked files so that they are linked the same as
        when they were backed up. One of the conditions that triggered these
        bugs included adding a new hardlinked file somewhere in the middle of a
        list of previously linked files.  The bug was originally reported here:
        https://savannah.nongnu.org/bugs/?26848
        """

        # Setup initial backup
        MakeOutputDir()
        output = rpath.RPath(Globals.local_connection, abs_output_dir)
        hlsrc_dir = os.path.join(abs_test_dir, b"src_hardlink")

        hlsrc = rpath.RPath(Globals.local_connection, hlsrc_dir)
        if hlsrc.lstat():
            hlsrc.delete()
        hlsrc.mkdir()
        hlsrc_sub = hlsrc.append("subdir")
        hlsrc_sub.mkdir()
        hl_file1 = hlsrc_sub.append("hardlink1")
        hl_file1.write_string(self.hello_str)
        hl_file3 = hlsrc_sub.append("hardlink3")
        hl_file3.hardlink(hl_file1.path)

        InternalBackup(1, 1, hlsrc.path, output.path, 10000)
        out_subdir = output.append("subdir")
        assert out_subdir.append("hardlink1").getinode() == \
            out_subdir.append("hardlink3").getinode()

        # validate that hashes and link counts are correctly saved in metadata
        meta_prefix = rpath.RPath(
            Globals.local_connection,
            os.path.join(abs_output_dir, b"rdiff-backup-data",
                         b"mirror_metadata"))
        incs = restore.get_inclist(meta_prefix)
        assert len(incs) == 1
        metadata_rp = incs[0]
        hashes, link_counts = self.extract_metadata(metadata_rp)
        # hashes for ., ./subdir, ./subdir/hardlink1, ./subdir/hardlink3
        expected_hashes = [None, None, self.hello_str_hash, None]
        assert expected_hashes == hashes, (expected_hashes, hashes)
        expected_link_counts = [1, 1, 2, 2]
        assert expected_link_counts == link_counts, (expected_link_counts, link_counts)

        # Create a new hardlinked file between "hardlink1" and "hardlink3" and perform another backup
        hl_file2 = hlsrc_sub.append("hardlink2")
        hl_file2.hardlink(hl_file1.path)

        InternalBackup(1, 1, hlsrc.path, output.path, 20000)
        assert out_subdir.append("hardlink1").getinode() == \
            out_subdir.append("hardlink2").getinode()
        assert out_subdir.append("hardlink1").getinode() == \
            out_subdir.append("hardlink3").getinode()

        # validate that hashes and link counts are correctly saved in metadata
        incs = restore.get_inclist(meta_prefix)
        assert len(incs) == 2
        if incs[0].getinctype() == b'snapshot':
            metadata_rp = incs[0]
        else:
            metadata_rp = incs[1]
        hashes, link_counts = self.extract_metadata(metadata_rp)
        # hashes for ., ./subdir/, ./subdir/hardlink1, ./subdir/hardlink2, ./subdir/hardlink3
        expected_hashes = [None, None, self.hello_str_hash, None, None]
        assert expected_hashes == hashes, (expected_hashes, hashes)
        expected_link_counts = [1, 1, 3, 3, 3]
        # The following assertion would fail as a result of bugs that are now fixed
        assert expected_link_counts == link_counts, (expected_link_counts, link_counts)

        # Now try restoring, still checking hard links.
        sub_path = os.path.join(abs_output_dir, b"subdir")
        restore_path = os.path.join(abs_test_dir, b"hl_restore")
        restore_dir = rpath.RPath(Globals.local_connection, restore_path)
        hlrestore_file1 = restore_dir.append("hardlink1")
        hlrestore_file2 = restore_dir.append("hardlink2")
        hlrestore_file3 = restore_dir.append("hardlink3")

        if restore_dir.lstat():
            restore_dir.delete()
        InternalRestore(1, 1, sub_path, restore_path, 10000)
        for rp in [hlrestore_file1, hlrestore_file3]:
            rp.setdata()
        assert hlrestore_file1.getinode() == hlrestore_file3.getinode()

        if restore_dir.lstat():
            restore_dir.delete()
        InternalRestore(1, 1, sub_path, restore_path, 20000)
        for rp in [hlrestore_file1, hlrestore_file2, hlrestore_file3]:
            rp.setdata()
        assert hlrestore_file1.getinode() == hlrestore_file2.getinode()
        # The following assertion would fail as a result of bugs that are now fixed
        assert hlrestore_file1.getinode() == hlrestore_file3.getinode()