Example #1
0
    def test_validate_snaps_restore(self):
        # pylint: disable=too-many-statements
        # Start IO on all mounts.
        all_mounts_procs = []
        count = 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Setting some volume option related to snapshot
        option_before_restore = {
            'volumeConfig': [{
                'softLimit': '100',
                'effectiveHardLimit': '200',
                'hardLimit': '256'
            }],
            'systemConfig': {
                'softLimit': '90%',
                'activateOnCreate': 'disable',
                'hardLimit': '256',
                'autoDelete': 'disable'
            }
        }
        ret = set_snap_config(self.mnode, option_before_restore)
        self.assertTrue(ret,
                        ("Failed to set vol option on  %s" % self.volname))
        g.log.info("Volume options for%s is set successfully", self.volname)

        # Get brick list before taking snap_restore
        bricks_before_snap_restore = get_all_bricks(self.mnode, self.volname)
        g.log.info("Brick List before snap restore "
                   "volume: %s", bricks_before_snap_restore)

        # Creating snapshot
        ret = snap_create(self.mnode, self.volname, "snap1")
        self.assertTrue(ret,
                        ("Failed to create snapshot for %s" % self.volname))
        g.log.info("Snapshot snap1 created successfully for volume  %s",
                   self.volname)

        # Again start IO on all mounts.
        all_mounts_procs = []
        count = 1000
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Reset volume to make sure volume options will reset
        ret = volume_reset(self.mnode, self.volname, force=False)
        self.assertTrue(ret, ("Failed to reset %s" % self.volname))
        g.log.info("Reset Volume %s is Successful", self.volname)

        # Removing one brick
        g.log.info("Starting volume shrink")
        ret = shrink_volume(self.mnode, self.volname, force=True)
        self.assertTrue(ret, ("Failed to shrink the volume on "
                              "volume %s", self.volname))
        g.log.info("Shrinking volume is successful on "
                   "volume %s", self.volname)

        # Restore snapshot
        ret = snap_restore_complete(self.mnode, self.volname, "snap1")
        self.assertTrue(ret, ("Failed to restore snap snap1 on the "
                              "volume %s", self.volname))
        g.log.info(
            "Restore of volume is successful from snap1 on "
            "volume  %s", self.volname)

        # Validate volume is up and running
        g.log.info("Verifying volume is up and process are online")
        ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
        self.assertTrue(
            ret, ("Volume %s : All process are not online", self.volname))
        g.log.info("Volume %s : All process are online", self.volname)

        # Get volume options post restore
        option_after_restore = get_snap_config(self.mnode)
        # Compare volume options
        self.assertNotEqual(option_before_restore, option_after_restore,
                            "Volume Options are not same after snap restore")

        # Get brick list post restore
        bricks_after_snap_restore = get_all_bricks(self.mnode, self.volname)
        g.log.info("Brick List after snap restore "
                   "volume: %s", bricks_after_snap_restore)
        # Compare brick_list
        self.assertNotEqual(bricks_before_snap_restore,
                            bricks_after_snap_restore,
                            "Bricks are not same after snap restore")

        # Creating snapshot
        ret = snap_create(self.mnode, self.volname, "snap2")
        self.assertTrue(ret,
                        ("Failed to create snapshot for %s" % self.volname))
        g.log.info("Snapshot snap2 created successfully for volume  %s",
                   self.volname)

        # Again start IO on all mounts after restore
        all_mounts_procs = []
        count = 1000
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")
Example #2
0
    def test_entry_transaction_crash_consistency_rename(self):
        """
        Test entry transaction crash consistency : rename

        Description:
        - Create IO of 50 files
        - Rename 20 files
        - Calculate arequal before creating snapshot
        - Create snapshot
        - Rename 20 files more
        - Stop the volume
        - Restore snapshot
        - Start the volume
        - Get arequal after restoring snapshot
        - Compare arequals
        """

        # Creating files on client side
        count = 1
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s create_files "
                   "--base-file-name %d -f 25 %s"
                   % (self.script_upload_path,
                      count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system, cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
            count = count + 10

        # Wait for IO to complete
        ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed to complete on some of the clients")
        self.io_validation_complete = True
        g.log.info("IO is successful on all mounts")

        # Rename files
        self.all_mounts_procs, self.io_validation_complete = [], False
        cmd = ("/usr/bin/env python %s mv -s FirstRename %s"
               % (self.script_upload_path,
                  self.mounts[0].mountpoint))
        proc = g.run_async(self.mounts[0].client_system, cmd,
                           user=self.mounts[0].user)
        self.all_mounts_procs.append(proc)

        # Wait for IO to complete
        ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts[0])
        self.assertTrue(ret, "IO failed to complete on some of the clients")
        self.io_validation_complete = True
        g.log.info("IO is successful on all mounts")

        # Get arequal before creating snapshot
        ret, result_before_snapshot = collect_mounts_arequal(self.mounts[0])
        self.assertTrue(ret, "Collecting arequal-checksum failed")

        # Create snapshot
        snapshot_name = ('entry_transaction_crash_consistency_rename-%s-%s'
                         % (self.volname, self.mount_type))
        ret, _, err = snap_create(self.mnode, self.volname, snapshot_name)
        self.assertEqual(ret, 0, err)
        g.log.info("Snapshot %s created successfully", snapshot_name)

        # Rename files
        self.all_mounts_procs, self.io_validation_complete = [], False
        cmd = ("/usr/bin/env python %s mv -s SecondRename %s"
               % (self.script_upload_path,
                  self.mounts[0].mountpoint))
        proc = g.run_async(self.mounts[0].client_system, cmd,
                           user=self.mounts[0].user)
        self.all_mounts_procs.append(proc)

        # Wait for IO to complete
        ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts[0])
        self.assertTrue(ret, "IO failed to complete on some of the clients")
        self.io_validation_complete = True
        g.log.info("IO is successful on all mounts")

        # Restore snapshot
        ret = snap_restore_complete(self.mnode, self.volname,
                                    snapshot_name)
        self.assertTrue(ret, 'Failed to restore snapshot %s'
                        % snapshot_name)
        g.log.info("Snapshot %s restored successfully", snapshot_name)

        # Check if heal is completed
        ret = is_heal_complete(self.mnode, self.volname)
        self.assertTrue(ret, 'Heal is not complete')
        g.log.info('Heal is completed successfully')

        # Wait for volume graph to get loaded.
        sleep(10)

        # Get arequal after restoring snapshot
        ret, result_after_restoring = collect_mounts_arequal(self.mounts[0])
        self.assertTrue(ret, "Collecting arequal-checksum failed")

        # Checking arequal before creating snapshot
        # and after restoring snapshot
        self.assertEqual(result_before_snapshot, result_after_restoring,
                         'Checksums are not equal')
        g.log.info('Checksums are equal')
Example #3
0
    def test_uss_snap_restore(self):
        """
        Description:
            This test case will validate USS after Snapshot restore.
            The restored snapshot should not be listed under the '.snaps'
            directory.

        * Perform I/O on mounts
        * Enable USS on volume
        * Validate USS is enabled
        * Create a snapshot
        * Activate the snapshot
        * Perform some more I/O
        * Create another snapshot
        * Activate the second
        * Restore volume to the second snapshot
        * From mount point validate under .snaps
          - first snapshot should be listed
          - second snapshot should not be listed
        """

        # pylint: disable=too-many-statements
        # Perform I/O
        cmd = ("/usr/bin/env python %s create_files "
               "-f 10 --base-file-name firstfiles %s" %
               (self.script_upload_path, self.mounts[0].mountpoint))
        proc = g.run_async(self.mounts[0].client_system,
                           cmd,
                           user=self.mounts[0].user)
        self.all_mounts_procs.append(proc)

        # Wait for IO to complete and validate IO
        self.assertTrue(
            wait_for_io_to_complete(self.all_mounts_procs, self.mounts[0]),
            "IO failed on %s" % self.mounts[0])
        g.log.info("IO is successful on all mounts")

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Enable USS
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable USS on volume")
        g.log.info("Successfully enabled USS on volume")

        # Validate USS is enabled
        ret = is_uss_enabled(self.mnode, self.volname)
        self.assertTrue(ret, "USS is disabled on volume %s" % self.volname)
        g.log.info("USS enabled on volume %s", self.volname)

        # Create a snapshot
        ret, _, _ = snap_create(self.mnode, self.volname, self.snapshots[0])
        self.assertEqual(ret, 0,
                         ("Failed to create snapshot for %s" % self.volname))
        g.log.info("Snapshot %s created successfully for volume  %s",
                   self.snapshots[0], self.volname)

        # Check for number of snaps using snap_list it should be 1 now
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            1, len(snap_list), "No of snaps not consistent "
            "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snapshots")

        # Activate the snapshot
        ret, _, _ = snap_activate(self.mnode, self.snapshots[0])
        self.assertEqual(
            ret, 0, ("Failed to activate snapshot %s" % self.snapshots[0]))
        g.log.info("Snapshot %s activated successfully", self.snapshots[0])

        # Perform I/O
        self.all_mounts_procs = []
        cmd = ("/usr/bin/env python %s create_files "
               "-f 10 --base-file-name secondfiles %s" %
               (self.script_upload_path, self.mounts[0].mountpoint))
        proc = g.run_async(self.mounts[0].client_system,
                           cmd,
                           user=self.mounts[0].user)
        self.all_mounts_procs.append(proc)

        # Wait for IO to complete and validate IO
        self.assertTrue(
            wait_for_io_to_complete(self.all_mounts_procs, self.mounts[0]),
            "IO failed on %s" % self.mounts[0])
        g.log.info("IO is successful on all mounts")

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Create another snapshot
        ret, _, _ = snap_create(self.mnode, self.volname, self.snapshots[1])
        self.assertEqual(
            ret, 0, ("Failed to create snapshot for volume %s" % self.volname))
        g.log.info("Snapshot %s created successfully for volume  %s",
                   self.snapshots[1], self.volname)

        # Check for number of snaps using snap_list it should be 2 now
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            2, len(snap_list), "No of snaps not consistent "
            "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snapshots")

        # Activate the second snapshot
        ret, _, _ = snap_activate(self.mnode, self.snapshots[1])
        self.assertEqual(
            ret, 0, ("Failed to activate snapshot %s" % self.snapshots[1]))
        g.log.info("Snapshot %s activated successfully", self.snapshots[1])

        # Restore volume to the second snapshot
        ret = snap_restore_complete(self.mnode, self.volname,
                                    self.snapshots[1])
        self.assertTrue(ret, ("Failed to restore snap %s on the "
                              "volume %s" % (self.snapshots[1], self.volname)))
        g.log.info("Restore of volume is successful from %s on "
                   "volume %s", self.snapshots[1], self.volname)

        # Verify all volume processes are online
        ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
        self.assertTrue(ret, "Failed: All volume processes are not online")
        g.log.info("All volume processes are online")
        ret = is_snapd_running(self.mnode, self.volname)
        self.assertTrue(
            ret, "Failed: snapd is not running for volume %s" % self.volname)
        g.log.info("Successful: snapd is running")

        # List activated snapshots under the .snaps directory
        snap_dir_list = get_uss_list_snaps(self.mounts[0].client_system,
                                           self.mounts[0].mountpoint)
        self.assertIsNotNone(
            snap_dir_list, "Failed to list snapshots under .snaps directory")
        g.log.info("Successfully gathered list of snapshots under the .snaps"
                   " directory")

        # Check for first snapshot as it should get listed here
        self.assertIn(self.snapshots[0], snap_dir_list,
                      ("Unexpected : %s not listed under .snaps "
                       "directory" % self.snapshots[0]))
        g.log.info("Activated Snapshot %s listed Successfully",
                   self.snapshots[0])

        # Check for second snapshot as it should not get listed here
        self.assertNotIn(self.snapshots[1], snap_dir_list,
                         ("Unexpected : %s listed in .snaps "
                          "directory" % self.snapshots[1]))
        g.log.info("Restored Snapshot %s not listed ", self.snapshots[1])