Example #1
0
    def test_heketi_volume_snapshot_delete(self):
        """Test heketi volume snapshot delete operation"""
        h_volume_size = 1
        snap_name = 'snap_test_heketi_volume_snapshot_create_1'
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        h_volume_info = heketi_volume_create(h_node,
                                             h_url,
                                             h_volume_size,
                                             json=True)
        self.addCleanup(heketi_volume_delete, h_node, h_url,
                        h_volume_info["id"])

        # Get the snapshot list before snap creation
        snap_list_before = get_snap_list('auto_get_gluster_endpoint')
        self.assertIsNotNone(
            snap_list_before,
            "Failed to get the snapshot list {}".format(snap_list_before))

        # Create a snapshot
        h_volume_name = h_volume_info["name"]
        ret, _, err = snap_create('auto_get_gluster_endpoint',
                                  h_volume_name,
                                  snap_name,
                                  timestamp=False)
        self.addCleanup(podcmd.GlustoPod()(snap_delete),
                        "auto_get_gluster_endpoint", snap_name)
        self.assertFalse(
            ret, "Failed to create snapshot {} for heketi volume {} with"
            " error {}".format(snap_name, h_volume_name, err))

        snap_list = get_snap_list('auto_get_gluster_endpoint')
        self.assertIsNotNone(
            snap_list, "Failed to get the snapshot list {}".format(snap_list))
        self.assertIn(
            snap_name, snap_list,
            "Heketi volume snapshot {} not found in {}".format(
                snap_name, snap_list))

        # Delete the snapshot
        ret, _, err = snap_delete('auto_get_gluster_endpoint', snap_name)
        self.assertFalse(
            ret, "Failed to delete snapshot {} for heketi volume with err {}".
            format(snap_name, err))

        # Check for count after snapshot deletion
        snap_list_after = get_snap_list('auto_get_gluster_endpoint')
        self.assertIsNotNone(
            snap_list_after,
            "Failed to get the snapshot list {}".format(snap_list_after))
        self.assertEqual(
            snap_list_before, snap_list_after,
            "Expecting Snapshot count before {} and after creation {} to be "
            "same".format(snap_list_before, snap_list_after))
Example #2
0
    def test_snap_list_invalid_cases_names(self):
        """
        Steps:
        1. create volume and mount it
        2. create snapshot with invalid snap name
           should fail
        3. create snapshot
        4. snapshot list Invalid command should fail
        5. snapshot list Invalid parameters with multiple
           and non-existing volume name should fail
        """
        # Creating snapshot with invalid snap name
        g.log.info("Creating snapshot with invalid snap name")
        ret, _, _ = snap_create(self.mnode, self.volname, self.snapinvalid)
        self.assertNotEqual(ret, 0, ("Unexpected: Snapshot %s created "
                                     "successfully for volume %s with "
                                     "invalid snap name" %
                                     (self.snapinvalid, self.volname)))
        g.log.info(
            "Expected: Failed to create snapshot %s for volume %s"
            "with invalid snap name", self.snap1, self.volname)

        # Creating snapshot
        g.log.info("Starting to Create snapshot")
        ret, _, _ = snap_create(self.mnode, self.volname, self.snap1)
        self.assertEqual(
            ret, 0, ("Failed to create snapshot for volume %s", self.volname))
        g.log.info("Snapshot %s created "
                   "successfully for volume"
                   "%s", self.snapinvalid, self.volname)

        # validate snapshot list with volname
        g.log.info("validate snapshot list with volname")
        out = get_snap_list(self.mnode)
        self.assertIsNotNone(out, "Failed to list all snapshots")
        self.assertEqual(len(out), 1, "Failed to validate snap_list")
        g.log.info("Successfully validated snapshot list")

        # listing snapshot with invalid volume name which should fail
        g.log.info("snapshot list with invalid volume name should fail")
        cmd = ("gluster snap list %s" % self.volname1)
        ret, _, _ = g.run(self.mnode, cmd)
        self.assertNotEqual(
            ret, 0, "Unexpected: Successfully listed "
            "all snapshots with invalid volume name "
            "%s" % self.volname1)
        g.log.info(
            "Expected to fail listing the snapshot with invalid"
            "volume name %s", self.volname1)

        # snapshot list with multiple and non-existing volume
        g.log.info("snapshot list Invalid parameter with "
                   "multiple and non-existing volume name should fail")
        cmd = ("gluster snap list %s %s" % (self.volname, self.volname1))
        ret, _, _ = g.run(self.mnode, cmd)
        self.assertNotEqual(ret, 0, "Unexpected: listed all snapshots")
        g.log.info("Expected: Failed to list snapshots")
Example #3
0
    def test_validate_snaps_dir_over_uss(self):

        # pylint: disable=too-many-statements
        """
        Run IOs on mount and take 2 snapshot.
        Activate 1 snapshot and check directory listing.
        Try to write to .snaps should not allow.
        Try listing the other snapshot should fail.
        """

        # run IOs
        self.counter = 1
        g.log.info("Starting IO on all mounts...")
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = (
                "/usr/bin/env python %s create_deep_dirs_with_files "
                "--dirname-start-num %d "
                "--dir-depth 2 "
                "--dir-length 2 "
                "--max-num-of-dirs 2 "
                "--num-of-files 2 %s" %
                (self.script_upload_path, self.counter, mount_obj.mountpoint))

            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
        self.io_validation_complete = False

        # Validate IO
        self.assertTrue(validate_io_procs(self.all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")
        self.io_validation_complete = True

        # get the snapshot list.
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(len(snap_list), 0, "Unexpected: %s snapshots"
                         "present" % len(snap_list))
        g.log.info("Expected: No snapshots present")

        # Create 2 snapshot
        g.log.info("Starting to Create Snapshots")
        for snap_num in range(0, 2):
            ret, _, _ = snap_create(self.mnode, self.volname,
                                    "snap-%s" % snap_num)
            self.assertEqual(
                ret, 0, "Snapshot Creation failed"
                " for snap-%s" % snap_num)
            g.log.info("Snapshot snap-%s of volume %s created"
                       " successfully", snap_num, self.volname)

        # Activate snap-0
        g.log.info("Activating snapshot snap-0")
        ret, _, _ = snap_activate(self.mnode, "snap-0")
        self.assertEqual(ret, 0, "Failed to activate " "Snapshot snap-0")
        g.log.info("Snapshot snap-0 Activated Successfully")

        # Enable USS for volume
        g.log.info("Enable uss for volume")
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(
            ret, 0, "Failed to enable USS for "
            " volume %s" % self.volname)
        g.log.info("Successfully enabled USS " "for volume %s", self.volname)

        # Validate uss enabled
        g.log.info("Validating uss enabled")
        ret = is_uss_enabled(self.mnode, self.volname)
        self.assertTrue(ret, "Failed to validate uss enable")
        g.log.info("Successfully validated uss enable for volume"
                   "%s", self.volname)

        # list activated snapshots directory under .snaps
        g.log.info("Listing activated snapshots under .snaps")
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(
                ret, 0, "Directory Listing Failed for"
                " Activated Snapshot")
            validate_dir = out.split('\n')
            self.assertIn(
                'snap-0', validate_dir, "Failed to "
                "validate snap-0 under .snaps directory")
            g.log.info("Activated Snapshot Successfully listed")
            self.assertNotIn(
                'snap-1', validate_dir, "Unexpected: "
                "Successfully listed snap-1 under "
                ".snaps directory")
            g.log.info("Expected: De-activated Snapshot not listed")

        # start I/0 ( write and read )
        g.log.info("Starting IO on all mounts...")
        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s create_files "
                   "-f 10 --base-file-name file %s/.snaps/abc/" %
                   (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # IO should fail
        g.log.info("IO should Fail with ROFS error.....")
        self.assertFalse(validate_io_procs(all_mounts_procs, self.mounts),
                         "Unexpected: IO successfully completed")
        g.log.info("Expected: IO failed to complete")

        # validate snap-0 present in mountpoint
        ret = view_snaps_from_mount(self.mounts, "snap-0")
        self.assertTrue(
            ret, "UnExpected: Unable to list content "
            "in activated snapshot"
            " activated snapshot")
        g.log.info("Expected: Successfully listed contents in"
                   " activated snapshot")
    def test_validate_snaps_create(self):
        # Creating snapshot using gluster snapshot create <snap1> <vol-name>
        cmd_str = "gluster snapshot create %s %s" % ("snap1", self.volname)
        ret = g.run(self.mnode, cmd_str)
        self.assertTrue(ret,
                        ("Failed to create snapshot for %s" % self.volname))
        g.log.info("Snapshot snap1 created successfully for volume  %s" %
                   (self.volname))
        """ Create snapshot of volume using
           -- gluster snapshot create <snap2> <vol-name(s)> [description
          <description with words and quotes>]
        """
        desc = 'description this is a snap with "snap2" name and description'
        cmd_str = ("gluster snapshot create %s %s %s" %
                   ("snap2", self.volname, desc))
        ret = g.run(self.mnode, cmd_str)
        self.assertTrue(ret,
                        ("Failed to create snapshot for %s" % self.volname))
        g.log.info("Snapshot snap2 created successfully for volume  %s" %
                   (self.volname))

        # Create one more snapshot of volume using force
        cmd_str = ("gluster snapshot create %s %s %s" %
                   ("snap3", self.volname, "force"))
        ret = g.run(self.mnode, cmd_str)
        self.assertTrue(ret,
                        ("Failed to create snapshot for %s" % self.volname))
        g.log.info("Snapshot snap3 created successfully for volume  %s" %
                   (self.volname))

        # Create one more snapshot of volume using no-timestamp option
        cmd_str = ("gluster snapshot create %s %s %s" %
                   ("snap4", self.volname, "no-timestamp"))
        ret = g.run(self.mnode, cmd_str)
        self.assertTrue(ret,
                        ("Failed to create snapshot for %s" % self.volname))
        g.log.info("Snapshot snap4 created successfully for volume  %s" %
                   (self.volname))

        # Delete all snaps
        ret, _, _ = snap_delete_all(self.mnode)
        self.assertEqual(ret, 0, "Snapshot delete failed.")
        g.log.info("Successfully deleted all snaps")

        # Start IO on all mounts.
        all_mounts_procs = []
        count = 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Create 5 snaps while IO is in progress
        for i in range(0, 5):
            cmd_str = "gluster snapshot create %s %s %s" % (
                "snapy%s" % i, self.volname, "no-timestamp")
            ret = g.run(self.mnode, cmd_str)
            self.assertTrue(
                ret, ("Failed to create snapshot for %s" % self.volname))
            g.log.info("Snapshot %s created successfully for volume  %s" %
                       ("snapy%s" % i, self.volname))

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Check for no of snaps using snap_list it should be 5 now
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            5, len(snap_list), "No of snaps not consistent "
            "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snaps.")

        # Validate all snaps created during IO
        for i in range(0, 5):
            self.assertIn("snapy%s" % i, snap_list, "%s snap not "
                          "found " % ("snapy%s" % i))
        g.log.info("Sucessfully validated names of snap")
Example #5
0
    def test_snapshot_while_rebalance(self):
        # pylint: disable=too-many-statements, missing-docstring
        # Start IO on all mounts.
        all_mounts_procs = []
        count = 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Create one snapshot of volume using no-timestamp option
        cmd_str = ("gluster snapshot create %s %s %s" %
                   ("snapy", self.volname, "no-timestamp"))
        ret, _, _ = g.run(self.mnode, cmd_str)
        self.assertEqual(ret, 0,
                         ("Failed to create snapshot for %s" % self.volname))
        g.log.info("Snapshot snapy created successfully "
                   "for volume %s", self.volname)

        # Check for no of snaps using snap_list it should be 1
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            1, len(snap_list), "Expected 1 snapshot "
            "found %s snapshots" % len(snap_list))
        g.log.info("Successfully validated number of snaps.")

        # validate snap name
        self.assertIn("snapy", snap_list, " snap not found")
        g.log.info("Successfully validated names of snap")

        # get the bricks for the volume
        g.log.info("Fetching bricks for the volume : %s", self.volname)
        bricks_list = get_all_bricks(self.mnode, self.volname)
        g.log.info("Brick List : %s", bricks_list)

        # expanding volume
        g.log.info("Start adding bricks to volume %s", self.volname)
        ret = expand_volume(self.mnode, self.volname, self.servers,
                            self.all_servers_info)
        self.assertTrue(ret, ("Failed to add bricks to "
                              "volume %s " % self.volname))
        g.log.info("Add brick successful")

        # Log Volume Info and Status after expanding the volume
        g.log.info("Logging volume info and Status after expanding volume")
        ret = log_volume_info_and_status(self.mnode, self.volname)
        self.assertTrue(ret, ("Logging volume info and status failed "
                              "on volume %s", self.volname))
        g.log.info(
            "Successful in logging volume info and status "
            "of volume %s", self.volname)

        # Verify volume's all process are online for 60 sec
        g.log.info("Verifying volume's all process are online")
        ret = wait_for_volume_process_to_be_online(self.mnode, self.volname,
                                                   60)
        self.assertTrue(ret, ("Volume %s : All process are not "
                              "online", self.volname))
        g.log.info("Successfully Verified volume %s "
                   "processes are online", self.volname)

        # Start Rebalance
        g.log.info("Starting Rebalance on the volume")
        ret, _, err = rebalance_start(self.mnode, self.volname)
        self.assertEqual(ret, 0,
                         ("Failed to start rebalance on "
                          "the volume %s with error %s" % (self.volname, err)))
        g.log.info("Successfully started rebalance on the "
                   "volume %s", self.volname)

        # Log Rebalance status
        g.log.info("Log Rebalance status")
        ret, _, _ = rebalance_status(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to log rebalance status")
        g.log.info("successfully logged rebalance status")

        # Create one snapshot of volume during rebalance
        cmd_str = ("gluster snapshot create %s %s %s" %
                   ("snapy_rebal", self.volname, "no-timestamp"))
        ret, _, _ = g.run(self.mnode, cmd_str)
        self.assertNotEqual(ret, 0, ("successfully created 'snapy_rebal'"
                                     " for %s" % self.volname))
        g.log.info("Snapshot 'snapy_rebal' not created as rebalance is in "
                   "progress check log")
        # Check for no of snaps using snap_list it should be 1
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            1, len(snap_list), "Expected 1 snapshot "
            "found %s snapshot" % len(snap_list))
        g.log.info("Successfully validated number of snaps.")

        # Wait for rebalance to complete
        g.log.info("Waiting for rebalance to complete")
        ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
        self.assertTrue(ret, ("Rebalance is not yet complete "
                              "on the volume %s", self.volname))
        g.log.info("Rebalance is successfully complete on "
                   "the volume %s", self.volname)

        # Check Rebalance status after rebalance is complete
        g.log.info("Checking Rebalance status")
        ret, _, _ = rebalance_status(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("Failed to get rebalance status for "
                                  "the volume %s", self.volname))
        g.log.info("Successfully got rebalance status of the "
                   "volume %s", self.volname)

        # Create one snapshot of volume post rebalance with same name
        cmd_str = ("gluster snapshot create %s %s %s" %
                   ("snapy_rebal", self.volname, "no-timestamp"))
        ret, _, _ = g.run(self.mnode, cmd_str)
        self.assertEqual(ret, 0,
                         ("Failed to create snapshot for %s" % self.volname))
        g.log.info(
            "Snapshot snapy_rebal created successfully "
            "for volume  %s", self.volname)

        # Check for no of snaps using snap_list it should be 2
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            2, len(snap_list), "Expected 2 snapshots "
            "found %s snapshot" % len(snap_list))
        g.log.info("Successfully validated number of snaps.")

        # validate snap name
        self.assertIn("snapy_rebal", snap_list, " snap not found")
        g.log.info("Successfully validated names of snap")
Example #6
0
    def test_snap_auto_delete(self):
        """
        Verifying snapshot auto-delete config option

        * Enable auto-delete snapshot
        * Set snap-max-hard limit and snap-max-soft-limit
        * Validate snap-max-hard-limit and snap-max-soft-limit
        * Verify the limits by creating another 20 snapshots
        * Oldest of newly created snapshots will be deleted
        * Retaining the latest 8 (softlimit) snapshots
        * Cleanup snapshots and volumes
        """

        # pylint: disable=too-many-statements
        # Enable auto-delete snapshot config option
        ret, _, _ = set_snap_config(self.mnode, self.autodel_enable)
        self.assertEqual(ret, 0, ("Failed to enable auto-delete snapshot "
                                  "config option on volume %s", self.volname))
        g.log.info("Successfully enabled snapshot auto-delete")

        # Set snap-max-hard-limit snapshot config option for volume
        max_hard_limit = {'snap-max-hard-limit': '10'}
        ret, _, _ = set_snap_config(self.mnode, max_hard_limit, self.volname)
        self.assertEqual(ret, 0, ("Failed to set snap-max-hard-limit"
                                  "config option for volume %s", self.volname))
        g.log.info(
            "Successfully set snap-max-hard-limit config option for"
            "volume %s", self.volname)

        # Validate snap-max-hard-limit snapshot config option
        hard_limit_val = get_snap_config(self.mnode)
        self.assertEqual(hard_limit_val['volumeConfig'][0]['hardLimit'], '10',
                         ("Failed to Validate snap-max-hard-limit"))
        g.log.info("Successfully validated snap-max-hard-limit")

        # Set snap-max-soft-limit snapshot config option
        max_soft_limit = {'snap-max-soft-limit': '80'}
        ret, _, _ = set_snap_config(self.mnode, max_soft_limit)
        self.assertEqual(ret, 0, ("Failed to set snap-max-soft-limit"
                                  "config option"))
        g.log.info("Successfully set snap-max-soft-limit config option")

        # Validate snap-max-soft-limit snapshot config option
        soft_limit_val = get_snap_config(self.mnode)
        self.assertEqual(soft_limit_val['volumeConfig'][0]['softLimit'], '8',
                         ("Failed to Validate max-soft-limit"))
        g.log.info("Successfully validated snap-max-soft-limit")

        # Create 20 snapshots. As the count of snapshots crosses the
        # soft-limit the oldest of newly created snapshot should
        # be deleted and only the latest 8 snapshots must remain.
        for snapname in self.snapshots:
            ret, _, _ = snap_create(self.mnode,
                                    self.volname,
                                    snapname,
                                    description="This is the Description wit#"
                                    " ($p3c1al) ch@r@cters!")
            self.assertEqual(ret, 0, ("Failed to create snapshot %s for "
                                      "volume %s", snapname, self.volname))
            g.log.info("Snapshot snap%s of volume %s created successfully",
                       snapname, self.volname)

        # Perform snapshot list to get total number of snaps after auto-delete
        # Validate the existence of the snapshots using the snapname
        snaplist = get_snap_list(self.mnode)
        self.assertEqual(len(snaplist), 8,
                         ("Failed: The snapshot count is not as expected"))
        for snapname in self.snapshots[-8:]:
            self.assertIn(
                snapname, snaplist, "Failed to validate snapshot "
                "existence for the snapshot %s" % snapname)
        g.log.info("Successful in validating the Snapshot count and existence "
                   "by snapname")
    def test_snap_glusterd_down(self):
        # pylint: disable=too-many-statements
        """
        Steps:

        1. create a volume
        2. mount volume
        3. create snapshot of that volume
        4. validate using snapshot info
        5. Activate snapshot
        6. List all snapshots present
        7. validate using snapshot info
        8. Stop glusterd on one node
        9. Check glusterd status
       10. deactivate created snapshot
       11. Start glusterd on that node
       12. Check glusterd status
       13. validate using snapshot info
       13. Check all peers are connected

        """
        # Creating snapshot:
        g.log.info("Starting to Create snapshot")
        ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
        self.assertEqual(ret, 0, ("Failed to create snapshot %s for volume %s"
                                  % (self.snap, self.volname)))
        g.log.info("Snapshot %s created successfully "
                   "for volume %s", self.snap, self.volname)

        # Check snapshot info
        g.log.info("Checking snapshot info")
        snap_info = get_snap_info_by_snapname(self.mnode, self.snap)
        self.assertIsNotNone(snap_info, "Failed to get snap information"
                             "for snapshot %s" % self.snap)
        status = snap_info['snapVolume']['status']
        self.assertNotEqual(status, 'Started', "snapshot %s "
                            "not started" % self.snap)
        g.log.info("Successfully checked snapshot info")

        # Activating snapshot
        g.log.info("Starting to Activate Snapshot")
        ret, _, _ = snap_activate(self.mnode, self.snap)
        self.assertEqual(ret, 0, ("Failed to Activate snapshot %s"
                                  % self.snap))
        g.log.info("Snapshot %s activated successfully", self.snap)

        # snapshot list
        g.log.info("Starting to validate list of snapshots")
        snap_list1 = get_snap_list(self.mnode)
        self.assertIsNotNone(snap_list1, "Failed to list all the snapshot")
        self.assertEqual(len(snap_list1), 1, "Failed to validate snap list")
        g.log.info("Snapshot list successfully validated")

        # Check snapshot info
        g.log.info("Checking snapshot info")
        snap_info = get_snap_info_by_snapname(self.mnode, self.snap)
        status = snap_info['snapVolume']['status']
        self.assertEqual(status, 'Started', "Failed to"
                         "start snapshot info")
        g.log.info("Successfully checked snapshot info")

        # Stop Glusterd on one node
        g.log.info("Stopping Glusterd on one node")
        ret = stop_glusterd(self.servers[1])

        # Check Glusterd status
        g.log.info("Check glusterd running or not")
        count = 0
        while count < 80:
            ret = is_glusterd_running(self.servers[1])
            if ret == 1:
                break
            time.sleep(2)
            count += 2
        self.assertEqual(ret, 1, "Unexpected: glusterd running on node %s" %
                         self.servers[1])
        g.log.info("Expected: Glusterd not running on node %s",
                   self.servers[1])

        # de-activating snapshot
        g.log.info("Starting to de-activate Snapshot")
        ret, _, _ = snap_deactivate(self.mnode, self.snap)
        self.assertEqual(ret, 0, ("Failed to deactivate snapshot %s"
                                  % self.snap))
        g.log.info("Snapshot %s deactivated successfully", self.snap)

        # validate snapshot info
        g.log.info("Checking snapshot info")
        snap_info = get_snap_info_by_snapname(self.mnode, self.snap)
        status = snap_info['snapVolume']['status']
        self.assertNotEqual(status, 'Started', "snapshot %s "
                            "not started" % self.snap)
        g.log.info("Successfully validated snapshot info")

        # Start Glusterd on node
        g.log.info("Starting Glusterd on node %s", self.servers[1])
        ret = start_glusterd(self.servers[1])
        self.assertTrue(ret, "Failed to start glusterd on %s node"
                        % self.servers[1])
        g.log.info("Successfully started glusterd on "
                   "%s node", self.servers[1])

        # Check Glusterd status
        g.log.info("Check glusterd running or not")
        count = 0
        while count < 80:
            ret = is_glusterd_running(self.servers[1])
            if ret:
                break
            time.sleep(2)
            count += 2
        self.assertEqual(ret, 0, "glusterd not running on node %s "
                         % self.servers[1])
        g.log.info("glusterd is running on %s node",
                   self.servers[1])

        # validate snapshot info
        g.log.info("Checking snapshot info")
        snap_info = get_snap_info_by_snapname(self.mnode, self.snap)
        self.assertIsNotNone(snap_info, "Failed to get snap info for"
                             " snapshot %s" % self.snap)
        status = snap_info['snapVolume']['status']
        self.assertNotEqual(status, 'Started', "snapshot"
                            " %s failed to validate with snap info"
                            % self.snap)
        g.log.info("Successfully validated snapshot info")

        # Check all the peers are in connected state
        g.log.info("Validating all the peers are in connected state")
        for servers in self.servers:
            count = 0
            while count < 80:
                ret = is_peer_connected(self.mnode, servers)
                if ret:
                    break
                time.sleep(2)
                count += 2
            self.assertTrue(ret, "All the nodes are not in cluster")
        g.log.info("Successfully validated all the peers")
    def test_uss_snap_active_deactive(self):

        # pylint: disable=too-many-statements
        """
        Steps:
        * Create volume
        * Mount volume
        * Perform I/O on mounts
        * Create 2 snapshots snapy1 & snapy2
        * Validate snap created
        * Enable USS
        * Validate USS is enabled
        * Validate snapd is running
        * Activate snapy1 & snapy2
        * List snaps under .snap directory
          -- snap1 and snap2 should be listed under .snaps
        * Deactivate snapy2
        * List snaps under .snap directory
          -- snapy2 is not listed as it is deactivated
        * Activate snapy2
        * List snaps under .snap directory
          -- snap1 and snap2 should be listed under .snaps
        """

        # Perform I/O
        g.log.info("Starting IO on all mounts...")
        self.counter = 1
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = (
                "python %s create_deep_dirs_with_files "
                "--dirname-start-num %d "
                "--dir-depth 2 "
                "--dir-length 2 "
                "--max-num-of-dirs 2 "
                "--num-of-files 2 %s" %
                (self.script_upload_path, self.counter, mount_obj.mountpoint))

            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
        self.io_validation_complete = False

        # Validate IO
        g.log.info("Wait for IO to complete and validate IO ...")
        ret = validate_io_procs(self.all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        self.io_validation_complete = True
        g.log.info("I/O successful on clients")

        # Enable USS
        g.log.info("Enable USS on volume")
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable USS on volume")
        g.log.info("Successfully enabled USS on volume")

        # Validate USS is enabled
        g.log.info("Validating USS is enabled")
        ret = is_uss_enabled(self.mnode, self.volname)
        self.assertTrue(ret, "USS is disabled on volume " "%s" % self.volname)
        g.log.info("USS enabled on volume %s", self.volname)

        # Validate snapd running
        for server in self.servers:
            g.log.info("Validating snapd daemon on:%s", server)
            ret = is_snapd_running(server, self.volname)
            self.assertTrue(ret, "Snapd is Not running on " "%s" % server)
            g.log.info("Snapd Running on node: %s", server)

        # Create 2 snapshot
        g.log.info("Creating 2 snapshots for volume %s", self.volname)
        for i in range(1, 3):
            ret, _, _ = snap_create(self.mnode, self.volname, "snapy%s" % i)
            self.assertEqual(
                ret, 0, ("Failed to create snapshot for %s" % self.volname))
            g.log.info("Snapshot %s created successfully for volume  %s",
                       "snapy%s" % i, self.volname)

        # Check for no of snaps using snap_list it should be 2 now
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            2, len(snap_list), "No of snaps not consistent "
            "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snaps.")

        # Activate snapshot snapy1 & snapy2
        g.log.info("Activating snapshot snapy1 & snapy2")
        for i in range(1, 3):
            ret, _, _ = snap_activate(self.mnode, "snapy%s" % i)
            self.assertEqual(ret, 0, "Failed to activate snapshot snapy%s" % i)
        g.log.info("Both snapshots activated successfully")

        # list activated snapshots directory under .snaps
        g.log.info("Listing activated snapshots under .snaps")
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(
                ret, 0, "Directory Listing Failed for"
                " Activated Snapshot")
            validate_dir = out.split('\n')
            self.assertIn(
                "snapy1", validate_dir, "Failed to "
                "validate snapy1 under .snaps directory")
            g.log.info("Activated Snapshot snapy1 listed Successfully")
            self.assertIn(
                "snapy2", validate_dir, "Successfully listed"
                " snapy2 under.snaps directory")
            g.log.info("Expected: De-activated Snapshot not listed")

        # Deactivate snapshot snapy2
        g.log.info("Deactivating snapshot snapy2")
        ret, _, _ = snap_deactivate(self.mnode, "snapy2")
        self.assertEqual(ret, 0, "Failed to deactivate snapshot snapy2")
        g.log.info("Successfully deactivated snapshot snapy2")

        # validate snapy2 should not present in mountpoint
        ret = view_snaps_from_mount(self.mounts, "snapy2")
        self.assertFalse(
            ret, " UnExpected : Still able to View snapy2"
            " from mount ")
        g.log.info("Successfully verified deactivated snapshot "
                   "snapy2 is not listed")

        # Activate snapshot snapy2
        ret, _, _ = snap_activate(self.mnode, "snapy2")
        self.assertEqual(ret, 0, "Failed to activate Snapshot snapy2")
        g.log.info("Snapshot snapy2 activated successfully")

        # list activated snapshots directory under .snaps
        g.log.info("Listing activated snapshots under .snaps")
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(
                ret, 0, "Directory Listing Failed for"
                " Activated Snapshot")
            validate_dir = out.split('\n')
            self.assertIn(
                "snapy1", validate_dir, "Failed to "
                "validate snapy%s under .snaps directory")
            g.log.info("Activated Snapshot listed Successfully")
            self.assertIn(
                "snapy2", validate_dir, "Successfully listed"
                "snapy2 under .snaps directory")
            g.log.info("Expected: De-activated Snapshot not listed")
Example #9
0
    def test_snap_del_gd_down(self):
        """
        Steps:
        1. Create volumes
        2. Create 5 snapshots
        3. Bring one node down
        4. Delete one snapshot
        5. list snapshot and validate delete
        6. Bring up the downed node
        7. Validate number of snaps after handshake on the
           brought down node.
        """
        # Create 5 snapshot
        g.log.info("Creating 5 snapshots for volume %s", self.volname)
        for i in range(0, 5):
            ret, _, _ = snap_create(self.mnode, self.volname, "snapy%s" % i)
            self.assertEqual(
                ret, 0, ("Failed to create snapshot for %s" % self.volname))
            g.log.info("Snapshot %s created successfully for volume  %s",
                       "snapy%s" % i, self.volname)

        # Check for no of snaps using snap_list it should be 5 now
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            5, len(snap_list), "No of snaps not consistent "
            "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snaps.")

        # Stopping glusterd service on server[1]
        ret = stop_glusterd(self.servers[1])
        self.assertTrue(
            ret,
            "Failed to stop glusterd service on node : %s" % self.servers[1])
        g.log.info("Stopped glusterd services successfully on: %s",
                   self.servers[1])

        # Delete one snapshot snapy1
        ret, _, _ = snap_delete(self.servers[0], "snapy1")
        self.assertEqual(ret, 0, "Failed to delete snapshot snapy1")
        g.log.info("Successfully deleted snapshot of snapy1")

        # Check for no of snaps using snap_list it should be 4 now
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            4, len(snap_list), "No of snaps not consistent "
            "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snaps.")

        # Starting glusterd services on server[1]
        ret = start_glusterd(self.servers[1])
        self.assertTrue(
            ret, "Failed to start glusterd on node "
            ": %s" % self.servers[1])
        g.log.info("Started glusterd services successfully on: %s",
                   self.servers[1])

        # Check for no of snaps using snap_list it should be 4 for server[1]
        count = 0
        # putting wait here for glusterd handshake
        while count < 60:
            snap_list = get_snap_list(self.servers[1])
            if len(snap_list) == 4:
                break
            time.sleep(2)
            count += 2
        self.assertEqual(
            4, len(snap_list), "No of snaps not consistent "
            "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snaps.")
    def test_validate_snaps_256(self):

        # Start IO on all mounts.
        all_mounts_procs = []
        count = 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # set config for 256 snpas (to make sure to override)
        cmd_str = ("gluster snapshot config snap-max-hard-limit 256"
                   " --mode=script")
        ret = g.run(self.mnode, cmd_str)
        self.assertTrue(ret, "Failed to set snap-max-hard-limit to 256.")
        g.log.info("snap-max-hard limit successfully set for 256.")

        # Create 256 snaps
        for i in range(1, 257, 1):
            cmd_str = "gluster snapshot create %s %s %s" % (
                "snapy%s" % i, self.volname, "no-timestamp")
            ret = g.run(self.mnode, cmd_str)
            self.assertTrue(
                ret, ("Failed to create snapshot for %s" % self.volname))
            g.log.info("Snapshot %s created successfully for volume  %s",
                       "snapy%s" % i, self.volname)

        # Check for no. of snaps using snap_list it should be 256
        snap_list = get_snap_list(self.mnode)
        self.assertTrue((len(snap_list) == 256), "No of snaps not consistent "
                        "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snaps.")

        # Validate all 256 snap names created during
        for i in range(1, 257, 1):
            self.assertTrue(("snapy%s" % i in snap_list), "%s snap not "
                            "found " % ("snapy%s" % i))
        g.log.info("Successfully validated names of snap")

        # Try to create 257th snapshot
        cmd_str = "gluster snapshot create %s %s %s" % ("snap", self.volname,
                                                        "no-timestamp")
        ret = g.run(self.mnode, cmd_str)
        self.assertEqual(ret, 1, ("Unexpected: Successfully created 'snap'"
                                  " for  volume %s" % self.volname))
        g.log.info("Snapshot 'snap' not created as it is 257th snap")

        # Check for no. of snaps using snap_list it should be 256
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            256, len(snap_list), "No of snaps not consistent "
            "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snaps.")
    def test_snap_clone_snapd(self):
        """
        Steps:

        1. create a volume
        2. Create a snapshots and activate
        3. Clone the snapshot and mount it
        4. Check for snapd daemon
        5. enable uss and validate snapd
        5. stop cloned volume
        6. Validate snapd
        7. start cloned volume
        8. validate snapd
        9. Create 5 more snapshot
        10. Validate total number of
            snapshots created.
        11. Activate 5 snapshots
        12. Enable USS
        13. Validate snapd
        14. kill snapd on all nodes
        15. validate snapd running
        16. force start clone volume
        17. validate snaps inside .snaps directory
        """
        # pylint: disable=too-many-statements, too-many-locals

        # Starting I/O
        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s create_files "
                   "-f 10 --base-file-name file %s" % (
                       self.script_upload_path,
                       mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system, cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # Validate I/O
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("IO is successful on all mounts")

        # Creating snapshot
        ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
        self.assertEqual(ret, 0, ("Failed to create snapshot for volume %s"
                                  % self.volname))
        g.log.info("Snapshot %s created successfully for "
                   "volume %s", self.snap, self.volname)

        # Activating created snapshots
        ret, _, _ = snap_activate(self.mnode, self.snap)
        self.assertEqual(ret, 0, ("Failed to activate snapshot %s"
                                  % self.snap))
        g.log.info("Snapshot snap%s activated successfully", self.snap)

        # Snapshot list
        self.assertIsNotNone(
            get_snap_list(self.mnode), "Failed to list snapshot")
        g.log.info("Snapshot list command Successful")

        # Creating and starting a Clone of snapshot:
        ret, _, _ = snap_clone(self.mnode, self.snap, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to clone %s" % self.clone_vol1)
        g.log.info("Clone volume %s created successfully", self.clone_vol1)

        # Start the clone volumes
        ret, _, _ = volume_start(self.mnode, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to start %s" % self.clone_vol1)
        g.log.info("%s started successfully", self.clone_vol1)

        # Form server list
        brick_list = get_all_bricks(self.mnode, self.clone_vol1)
        for bricks in brick_list:
            self.server_lists.append(bricks.split(":")[0])
        self.server_list = list(set(self.server_lists))

        # Get volume info
        vol_info = get_volume_info(self.mnode, self.clone_vol1)
        self.assertIsNotNone(vol_info, "Failed to get vol info")
        g.log.info("Successfully in getting vol info")

        # Redefining mounts for cloned volume
        self.mount_points, self.mounts_dict_list = [], []
        for client in self.all_clients_info:
            mount = {
                'protocol': self.mount_type,
                'server': self.mnode,
                'volname': self.volname,
                'client': self.all_clients_info[client],
                'mountpoint': (path.join(
                    "%s" % self.mpoint)),
                'options': ''
            }
            self.mounts_dict_list.append(mount)
        self.mount1 = create_mount_objs(self.mounts_dict_list)
        self.mount_points.append(self.mpoint)
        g.log.info("Successfully made entry in self.mount1")

        # FUSE mount clone1 volume
        for mount_obj in self.mounts:
            ret, _, _ = mount_volume(self.clone_vol1, self.mount_type,
                                     self.mpoint,
                                     self.mnode, mount_obj.client_system)
            self.assertEqual(ret, 0, "Volume mount failed for clone1")
            g.log.info("%s mounted Successfully", self.clone_vol1)

            # Validate clone volume is mounted or not
            ret = is_mounted(self.clone_vol1, self.mpoint, self.mnode,
                             mount_obj.client_system, self.mount_type)
            self.assertTrue(ret, "Volume not mounted on mount point: "
                            "%s" % self.mpoint)
            g.log.info("Volume %s mounted on %s", self.clone_vol1, self.mpoint)

        # Log Cloned Volume information
        ret = log_volume_info_and_status(self.mnode, self.clone_vol1)
        self.assertTrue("Failed to Log Info and Status of Volume "
                        "%s" % self.clone_vol1)
        g.log.info("Successfully Logged Info and Status")

        # Validate snapd running on all nodes
        self.validate_snapd(check_condition=False)

        # Enable USS
        ret, _, _ = enable_uss(self.mnode, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to enable USS on cloned volume")
        g.log.info("Successfully enabled USS on Cloned volume")

        # Validate USS running
        self.validate_uss()

        # Validate snapd running on all nodes
        self.validate_snapd()

        # Stop cloned volume
        ret, _, _ = volume_stop(self.mnode, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to stop cloned volume "
                         "%s" % self.clone_vol1)
        g.log.info("Successfully Stopped Cloned volume %s", self.clone_vol1)

        # Validate snapd running on all nodes
        self.validate_snapd(check_condition=False)

        # Start cloned volume
        ret, _, _ = volume_start(self.mnode, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to start cloned volume"
                         " %s" % self.clone_vol1)
        g.log.info("Successfully started cloned volume"
                   " %s", self.clone_vol1)

        # Validate snapd running on all nodes
        self.validate_snapd()

        # Create 5 snapshots
        self.snaps_list = [('test_snap_clone_snapd-snap%s'
                            % i)for i in range(0, 5)]
        for snapname in self.snaps_list:
            ret, _, _ = snap_create(self.mnode, self.clone_vol1,
                                    snapname)
            self.assertEqual(ret, 0, ("Failed to create snapshot for volume"
                                      " %s" % self.clone_vol1))
            g.log.info("Snapshot %s created successfully for volume "
                       "%s", snapname, self.clone_vol1)

        # Validate USS running
        self.validate_uss()

        # Check snapshot under .snaps directory
        self.check_snaps()

        # Activate Snapshots
        for snapname in self.snaps_list:
            ret, _, _ = snap_activate(self.mnode, snapname)
            self.assertEqual(ret, 0, ("Failed to activate snapshot %s"
                                      % snapname))
            g.log.info("Snapshot %s activated "
                       "successfully", snapname)

        # Validate USS running
        self.validate_uss()

        # Validate snapshots under .snaps folder
        self.validate_snaps()

        # Kill snapd on node and validate snapd except management node
        for server in self.servers[1:]:
            ret, _, _ = terminate_snapd_on_node(server)
            self.assertEqual(ret, 0, "Failed to Kill snapd on node %s"
                             % server)
            g.log.info("snapd Killed Successfully on node %s", server)

            # Check snapd running
            ret = is_snapd_running(server, self.clone_vol1)
            self.assertTrue(ret, "Unexpected: Snapd running on node: "
                            "%s" % server)
            g.log.info("Expected: Snapd is not running on node:%s", server)

            # Check snapshots under .snaps folder
            g.log.info("Validating snapshots under .snaps")
            ret, _, _ = uss_list_snaps(self.clients[0], self.mpoint)
            self.assertEqual(ret, 0, "Target endpoint not connected")
            g.log.info("Successfully listed snapshots under .snaps")

        # Kill snapd in management node
        ret, _, _ = terminate_snapd_on_node(self.servers[0])
        self.assertEqual(ret, 0, "Failed to Kill snapd on node %s"
                         % self.servers[0])
        g.log.info("snapd Killed Successfully on node %s", self.servers[0])

        # Validate snapd running on all nodes
        self.validate_snapd(check_condition=False)

        # Validating snapshots under .snaps
        ret, _, _ = uss_list_snaps(self.clients[0], self.mpoint)
        self.assertNotEqual(ret, 0, "Unexpected: Successfully listed "
                            "snapshots under .snaps")
        g.log.info("Expected: Target endpoint not connected")

        # Start the Cloned volume(force start)
        ret, _, _ = volume_start(self.mnode, self.clone_vol1, force=True)
        self.assertEqual(ret, 0, "Failed to start cloned volume "
                         "%s" % self.clone_vol1)
        g.log.info("Successfully Started Cloned volume %s", self.clone_vol1)

        # Validate snapd running on all nodes
        self.validate_snapd()

        # Validate snapshots under .snaps folder
        self.validate_snaps()
Example #12
0
    def test_uss_snap_restore(self):
        """
        Description:
            This test case will validate USS after Snapshot restore.
            The restored snapshot should not be listed under the '.snaps'
            directory.

        * Perform I/O on mounts
        * Enable USS on volume
        * Validate USS is enabled
        * Create a snapshot
        * Activate the snapshot
        * Perform some more I/O
        * Create another snapshot
        * Activate the second
        * Restore volume to the second snapshot
        * From mount point validate under .snaps
          - first snapshot should be listed
          - second snapshot should not be listed
        """

        # pylint: disable=too-many-statements
        # Perform I/O
        cmd = ("/usr/bin/env python %s create_files "
               "-f 10 --base-file-name firstfiles %s" %
               (self.script_upload_path, self.mounts[0].mountpoint))
        proc = g.run_async(self.mounts[0].client_system,
                           cmd,
                           user=self.mounts[0].user)
        self.all_mounts_procs.append(proc)

        # Wait for IO to complete and validate IO
        self.assertTrue(
            wait_for_io_to_complete(self.all_mounts_procs, self.mounts[0]),
            "IO failed on %s" % self.mounts[0])
        g.log.info("IO is successful on all mounts")

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Enable USS
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable USS on volume")
        g.log.info("Successfully enabled USS on volume")

        # Validate USS is enabled
        ret = is_uss_enabled(self.mnode, self.volname)
        self.assertTrue(ret, "USS is disabled on volume %s" % self.volname)
        g.log.info("USS enabled on volume %s", self.volname)

        # Create a snapshot
        ret, _, _ = snap_create(self.mnode, self.volname, self.snapshots[0])
        self.assertEqual(ret, 0,
                         ("Failed to create snapshot for %s" % self.volname))
        g.log.info("Snapshot %s created successfully for volume  %s",
                   self.snapshots[0], self.volname)

        # Check for number of snaps using snap_list it should be 1 now
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            1, len(snap_list), "No of snaps not consistent "
            "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snapshots")

        # Activate the snapshot
        ret, _, _ = snap_activate(self.mnode, self.snapshots[0])
        self.assertEqual(
            ret, 0, ("Failed to activate snapshot %s" % self.snapshots[0]))
        g.log.info("Snapshot %s activated successfully", self.snapshots[0])

        # Perform I/O
        self.all_mounts_procs = []
        cmd = ("/usr/bin/env python %s create_files "
               "-f 10 --base-file-name secondfiles %s" %
               (self.script_upload_path, self.mounts[0].mountpoint))
        proc = g.run_async(self.mounts[0].client_system,
                           cmd,
                           user=self.mounts[0].user)
        self.all_mounts_procs.append(proc)

        # Wait for IO to complete and validate IO
        self.assertTrue(
            wait_for_io_to_complete(self.all_mounts_procs, self.mounts[0]),
            "IO failed on %s" % self.mounts[0])
        g.log.info("IO is successful on all mounts")

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Create another snapshot
        ret, _, _ = snap_create(self.mnode, self.volname, self.snapshots[1])
        self.assertEqual(
            ret, 0, ("Failed to create snapshot for volume %s" % self.volname))
        g.log.info("Snapshot %s created successfully for volume  %s",
                   self.snapshots[1], self.volname)

        # Check for number of snaps using snap_list it should be 2 now
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            2, len(snap_list), "No of snaps not consistent "
            "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snapshots")

        # Activate the second snapshot
        ret, _, _ = snap_activate(self.mnode, self.snapshots[1])
        self.assertEqual(
            ret, 0, ("Failed to activate snapshot %s" % self.snapshots[1]))
        g.log.info("Snapshot %s activated successfully", self.snapshots[1])

        # Restore volume to the second snapshot
        ret = snap_restore_complete(self.mnode, self.volname,
                                    self.snapshots[1])
        self.assertTrue(ret, ("Failed to restore snap %s on the "
                              "volume %s" % (self.snapshots[1], self.volname)))
        g.log.info("Restore of volume is successful from %s on "
                   "volume %s", self.snapshots[1], self.volname)

        # Verify all volume processes are online
        ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
        self.assertTrue(ret, "Failed: All volume processes are not online")
        g.log.info("All volume processes are online")
        ret = is_snapd_running(self.mnode, self.volname)
        self.assertTrue(
            ret, "Failed: snapd is not running for volume %s" % self.volname)
        g.log.info("Successful: snapd is running")

        # List activated snapshots under the .snaps directory
        snap_dir_list = get_uss_list_snaps(self.mounts[0].client_system,
                                           self.mounts[0].mountpoint)
        self.assertIsNotNone(
            snap_dir_list, "Failed to list snapshots under .snaps directory")
        g.log.info("Successfully gathered list of snapshots under the .snaps"
                   " directory")

        # Check for first snapshot as it should get listed here
        self.assertIn(self.snapshots[0], snap_dir_list,
                      ("Unexpected : %s not listed under .snaps "
                       "directory" % self.snapshots[0]))
        g.log.info("Activated Snapshot %s listed Successfully",
                   self.snapshots[0])

        # Check for second snapshot as it should not get listed here
        self.assertNotIn(self.snapshots[1], snap_dir_list,
                         ("Unexpected : %s listed in .snaps "
                          "directory" % self.snapshots[1]))
        g.log.info("Restored Snapshot %s not listed ", self.snapshots[1])
    def test_validate_snaps_256(self):
        """
        Validate snapshot creation for 256 snapshots

        * Perform some IO
        * Set snapshot config option snap-max-hard-limit to 256
        * Create 256 snapshots
        * Verify 256 created successfully
        * Create 257th snapshot - creation should fail as it will
          exceed the hard-limit
        * Verify snapshot list for 256 snapshots

        """
        # pylint: disable=too-many-statements
        # Start IO on all mounts
        cmd = (
            "/usr/bin/env python %s create_files "
            "-f 10 --base-file-name firstfiles %s"
            % (self.script_upload_path,
               self.mounts[0].mountpoint))
        proc = g.run_async(
            self.mounts[0].client_system, cmd, user=self.mounts[0].user)
        self.all_mounts_procs.append(proc)

        # Wait for IO to complete
        self.assertTrue(
            wait_for_io_to_complete(self.all_mounts_procs, self.mounts[0]),
            "IO failed on %s" % self.mounts[0])
        g.log.info("IO is successful on all mounts")

        # Perform stat on all the files/dirs created
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully performed stat on all files/dirs created")

        # Set config option snap-max-hard-limit to 256
        # This is to make sure to override
        max_hard_limit = {'snap-max-hard-limit': '256'}
        ret, _, _ = set_snap_config(self.mnode, max_hard_limit)
        self.assertEqual(ret, 0, "Failed to set snapshot config option "
                         "snap-max-hard-limit to 256")
        g.log.info("Successfully set snapshot config option "
                   "snap-max-hard-limit to 256")

        # Create 256 snapshots
        for snapname in self.snapshots:
            ret, _, _ = snap_create(self.mnode, self.volname, snapname)
            self.assertEqual(ret, 0, ("Failed to create snapshot %s for %s"
                                      % (snapname, self.volname)))
            sleep(1)
        g.log.info("Snapshots created successfully for volume %s",
                   self.volname)

        # Validate snapshot list for 256 snapshots
        snap_list = get_snap_list(self.mnode)
        self.assertTrue((len(snap_list) == 256), "Failed: Number of snapshots "
                        "is not consistent for volume %s" % self.volname)
        g.log.info("Successfully validated number of snapshots")

        # Validate snapshot existence using snap-name
        for snapname in self.snapshots:
            self.assertIn(snapname, snap_list,
                          "Failed: Snapshot %s not found" % snapname)
        g.log.info("Successfully validated snapshots existence using "
                   "snap-name")

        # Try to exceed snap-max-hard-limit by creating 257th snapshot
        snap_257 = "snap-test-validate-256-snapshots-%s-257" % (self.volname)
        ret, _, _ = snap_create(self.mnode, self.volname, snap_257)
        self.assertEqual(
            ret, 1, ("Unexpected: Successfully created %s for  volume %s"
                     % (snap_257, self.volname)))
        g.log.info("Snapshot %s not created as it exceeds the "
                   "snap-max-hard-limit", snap_257)

        # Validate snapshot list for 256 snapshots
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(len(snap_list), 256, "Failed: Number of snapshots "
                         "is not consistent for volume %s" % self.volname)
        g.log.info("Successfully validated number of snapshots")
    def test_snap_delete_and_list_glusterd_down(self):
        # pylint: disable=too-many-statements
        """
        Steps:

        1. create a volume
        2. mount volume
        3. create 3 snapshot of that volume
        4. delete snapshot snap1
        5. list all snapshots created
        6. restart glusterd
        7. list all snapshots created
           except snap1
        """

        # Creating snapshot:
        g.log.info("Starting to Create snapshot")
        for snap_count in range(0, 3):
            self.snap = "snap%s" % snap_count
            ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
            self.assertEqual(ret, 0, ("Failed to create snapshot for "
                                      "volume %s" % self.volname))
            g.log.info("Snapshot %s created successfully "
                       "for volume %s", self.snap, self.volname)

        # delete snap1 snapshot
        g.log.info("Starting to Delete snapshot snap1")
        ret, _, _ = snap_delete(self.mnode, "snap1")
        self.assertEqual(ret, 0, "Failed to delete" "snapshot snap1")
        g.log.info("Snapshots snap1 deleted Successfully")

        # snapshot list
        g.log.info("Starting to list all snapshots")
        out = get_snap_list(self.mnode)
        self.assertIsNotNone(out, "Failed to list all snapshots")
        self.assertEqual(len(out), 2, "Failed to validate snap list")
        g.log.info("Successfully validated snap list")

        # restart Glusterd
        g.log.info("Restarting Glusterd on all nodes")
        ret = restart_glusterd(self.servers)
        self.assertTrue(
            ret, "Failed to restart glusterd on nodes"
            "%s" % self.servers)
        g.log.info("Successfully restarted glusterd on nodes"
                   " %s", self.servers)

        # check glusterd running
        g.log.info("Checking glusterd is running or not")
        count = 0
        while count < 80:
            ret = is_glusterd_running(self.servers)
            if ret == 0:
                break
            time.sleep(2)
            count += 1

        self.assertEqual(
            ret, 0, "Failed to validate glusterd "
            "running on nodes %s" % self.servers)
        g.log.info("glusterd is running on " "nodes %s", self.servers)

        # snapshot list
        g.log.info("Starting to list all snapshots")
        for server in self.servers[0:]:
            out = get_snap_list(server)
            self.assertIsNotNone(out, "Failed to list snap in node"
                                 "%s" % server)
            self.assertEqual(
                len(out), 2, "Failed to validate snap list"
                "on node %s" % server)
            g.log.info("Successfully validated snap list on node %s", server)
Example #15
0
    def test_uss_brick_down(self):

        # pylint: disable=too-many-statements
        """
        Steps:
        * Create volume
        * Mount volume
        * Perform I/O on mounts
        * Bring down one brick
        * Enable USS
        * Validate USS is enabled
        * Bring the brick online using gluster v start force
        * Create 2 snapshots snapy1 & snapy2
        * Validate snap created
        * Activate snapy1 & snapy2
        * List snaps under .snap directory
          -- snap1 and snap2 should be listed under .snaps
        """

        # Perform I/O
        g.log.info("Starting IO on all mounts...")
        self.counter = 1
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = (
                "/usr/bin/env python %s create_deep_dirs_with_files "
                "--dirname-start-num %d "
                "--dir-depth 2 "
                "--dir-length 2 "
                "--max-num-of-dirs 2 "
                "--num-of-files 2 %s" %
                (self.script_upload_path, self.counter, mount_obj.mountpoint))

            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
        self.io_validation_complete = False

        # Validate IO
        self.assertTrue(validate_io_procs(self.all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")
        self.io_validation_complete = True

        # Bring down 1 brick from brick list
        g.log.info("Getting all the bricks of the volume")
        bricks_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(bricks_list, "Failed to get the brick list")
        g.log.info("Successfully got the list of bricks of volume")

        ret = bring_bricks_offline(self.volname, bricks_list[0])
        self.assertTrue(ret,
                        ("Failed to bring down the brick %s ", bricks_list[0]))
        g.log.info("Successfully brought the brick %s down", bricks_list[0])

        # Enable USS
        g.log.info("Enable USS on volume")
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable USS on volume")
        g.log.info("Successfully enabled USS on volume")

        # Validate USS is enabled
        g.log.info("Validating USS is enabled")
        ret = is_uss_enabled(self.mnode, self.volname)
        self.assertTrue(ret, "USS is disabled on volume " "%s" % self.volname)
        g.log.info("USS enabled on volume %s", self.volname)

        #  Bring the brick online using gluster v start force
        g.log.info("Bring the brick online using gluster v start force")
        ret, _, _ = volume_start(self.mnode, self.volname, force=True)
        self.assertEqual(ret, 0, "Volume start with force failed")
        g.log.info("Volume start with force successful")

        # Create 2 snapshot
        g.log.info("Creating 2 snapshots for volume %s", self.volname)
        for i in range(0, 2):
            ret, _, _ = snap_create(self.mnode, self.volname, "snapy%s" % i)
            self.assertEqual(
                ret, 0, ("Failed to create snapshot for %s" % self.volname))
            g.log.info("Snapshot %s created successfully for volume  %s",
                       "snapy%s" % i, self.volname)

        # Check for no of snaps using snap_list it should be 2 now
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            2, len(snap_list), "No of snaps not consistent "
            "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snaps.")

        # Activate snapshot snapy1 & snapy2
        g.log.info("Activating snapshot snapy1 & snapy2")
        for i in range(0, 2):
            ret, _, _ = snap_activate(self.mnode, "snapy%s" % i)
            self.assertEqual(ret, 0, "Failed to activate snapshot snapy%s" % i)
        g.log.info("Both snapshots activated successfully")

        # list activated snapshots directory under .snaps
        g.log.info("Listing activated snapshots under .snaps")
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(
                ret, 0, "Directory Listing Failed for"
                " Activated Snapshot")
            validate_dir = out.split('\n')
            for i in range(0, 2):
                self.assertIn(
                    "snapy%s" % i, validate_dir, "Failed to "
                    "validate snapy%s under .snaps directory")
                g.log.info("Activated Snapshot snapy%s listed Successfully", i)
    def test_snapshot_basic_commands_when_io_in_progress(self):
        """Create, List, Activate, Enable USS (User Serviceable Snapshot),
            Viewing Snap of the volume from mount, De-Activate
            when IO is in progress.
        """
        snap_name = "snap_cvt"
        # Create Snapshot
        g.log.info("Creating snapshot %s of the volume %s", snap_name,
                   self.volname)
        ret, _, _ = snap_create(self.mnode, self.volname, snap_name)
        self.assertEqual(ret, 0,
                         ("Failed to create snapshot with name %s "
                          " of the volume %s", snap_name, self.volname))
        g.log.info("Successfully created snapshot %s of the volume %s",
                   snap_name, self.volname)

        # List Snapshot
        g.log.info("Listing the snapshot created for the volume %s",
                   self.volname)
        snap_list = get_snap_list(self.mnode)
        self.assertIsNotNone(snap_list, "Unable to get the Snapshot list")
        self.assertIn(snap_name, snap_list,
                      ("snapshot %s not listed in Snapshot list", snap_name))
        g.log.info("Successfully listed snapshot %s in gluster snapshot list",
                   snap_name)

        # Activate the snapshot
        g.log.info("Activating snapshot %s of the volume %s", snap_name,
                   self.volname)
        ret, _, _ = snap_activate(self.mnode, snap_name)
        self.assertEqual(ret, 0,
                         ("Failed to activate snapshot with name %s "
                          " of the volume %s", snap_name, self.volname))
        g.log.info("Successfully activated snapshot %s of the volume %s",
                   snap_name, self.volname)

        # Enable USS on the volume.
        uss_options = ["features.uss"]
        if self.mount_type == "cifs":
            uss_options.append("features.show-snapshot-directory")
        g.log.info("Enable uss options %s on the volume %s", uss_options,
                   self.volname)
        ret = enable_and_validate_volume_options(self.mnode,
                                                 self.volname,
                                                 uss_options,
                                                 time_delay=30)
        self.assertTrue(ret, ("Unable to enable uss options %s on volume %s",
                              uss_options, self.volname))
        g.log.info("Successfully enabled uss options %s on the volume: %s",
                   uss_options, self.volname)

        # Viewing snapshot from mount
        g.log.info("Viewing Snapshot %s from mounts:", snap_name)
        ret = view_snaps_from_mount(self.mounts, snap_name)
        self.assertTrue(ret, ("Failed to View snap %s from mounts", snap_name))
        g.log.info("Successfully viewed snap %s from mounts", snap_name)

        # De-Activate the snapshot
        g.log.info("Deactivating snapshot %s of the volume %s", snap_name,
                   self.volname)
        ret, _, _ = snap_deactivate(self.mnode, snap_name)
        self.assertEqual(ret, 0,
                         ("Failed to deactivate snapshot with name %s "
                          " of the volume %s", snap_name, self.volname))
        g.log.info("Successfully deactivated snapshot %s of the volume %s",
                   snap_name, self.volname)

        # Viewing snapshot from mount (.snaps shouldn't be listed from mount)
        for mount_obj in self.mounts:
            g.log.info("Viewing Snapshot %s from mount %s:%s", snap_name,
                       mount_obj.client_system, mount_obj.mountpoint)
            ret = view_snaps_from_mount(mount_obj, snap_name)
            self.assertFalse(ret, ("Still able to View snap %s from mount "
                                   "%s:%s", snap_name, mount_obj.client_system,
                                   mount_obj.mountpoint))
            g.log.info("%s not listed under .snaps from mount %s:%s",
                       snap_name, mount_obj.client_system,
                       mount_obj.mountpoint)
        g.log.info(
            "%s not listed under .snaps from mounts after "
            "deactivating ", snap_name)

        # Validate IO
        g.log.info("Wait for IO to complete and validate IO ...")
        ret = validate_io_procs(self.all_mounts_procs, self.mounts)
        self.io_validation_complete = True
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("IO is successful on all mounts")

        # List all files and dirs created
        g.log.info("List all files and directories:")
        ret = list_all_files_and_dirs_mounts(self.mounts)
        self.assertTrue(ret, "Failed to list all files and dirs")
        g.log.info("Listing all files and directories is successful")
Example #17
0
    def test_restore_online_vol(self):

        # pylint: disable=too-many-statements
        """
        Steps:
        1. Create volume
        2. Mount volume
        3. Perform I/O on mounts
        4. Create 1 snapshots snapy1
        5. Validate snap created
        6. Perform some more I/O
        7. Create 1 more snapshot snapy2
        8. Restore volume to snapy1
          -- Restore should fail with message
             "volume needs to be stopped before restore"
        """

        # Performing step 3 to 7 in loop here
        for i in range(1, 3):
            # Perform I/O
            g.log.info("Starting IO on all mounts...")
            self.counter = 1
            self.all_mounts_procs = []
            for mount_obj in self.mounts:
                g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                           mount_obj.mountpoint)
                cmd = ("python %s create_deep_dirs_with_files "
                       "--dirname-start-num %d "
                       "--dir-depth 2 "
                       "--dir-length 2 "
                       "--max-num-of-dirs 2 "
                       "--num-of-files 2 %s" %
                       (self.script_upload_path, self.counter,
                        mount_obj.mountpoint))

                proc = g.run_async(mount_obj.client_system,
                                   cmd,
                                   user=mount_obj.user)
                self.all_mounts_procs.append(proc)
            self.io_validation_complete = False

            # Validate IO
            self.assertTrue(
                validate_io_procs(self.all_mounts_procs, self.mounts),
                "IO failed on some of the clients")
            self.io_validation_complete = True

            # Get stat of all the files/dirs created.
            g.log.info("Get stat of all the files/dirs created.")
            ret = get_mounts_stat(self.mounts)
            self.assertTrue(ret, "Stat failed on some of the clients")
            g.log.info("Successfully got stat of all files/dirs created")

            # Create snapshot
            g.log.info("Creating snapshot for volume %s", self.volname)
            ret, _, _ = snap_create(self.mnode, self.volname, "snapy%s" % i)
            self.assertEqual(
                ret, 0, ("Failed to create snapshot for %s" % self.volname))
            g.log.info("Snapshot created successfully for volume  %s",
                       self.volname)

            # Check for no of snaps using snap_list
            snap_list = get_snap_list(self.mnode)
            self.assertEqual(
                i, len(snap_list), "No of snaps not consistent "
                "for volume %s" % self.volname)
            g.log.info("Successfully validated number of snaps.")

            # Increase counter for next iteration
            self.counter = 1000

        # Restore volume to snapshot snapy2, it should fail
        i = 2
        g.log.info("Starting to restore volume to snapy%s", i)
        ret, _, err = snap_restore(self.mnode, "snapy%s" % i)
        errmsg = ("snapshot restore: failed: Volume (%s) has been started. "
                  "Volume needs to be stopped before restoring a snapshot.\n" %
                  self.volname)
        log_msg = ("Expected : %s, but Returned : %s", errmsg, err)
        self.assertEqual(err, errmsg, log_msg)
        g.log.info("Expected : Failed to restore volume to snapy%s", i)
    def test_snap_list_glusterd_restart(self):
        """
        Verify snapshot list before and after glusterd restart

        * Create 3 snapshots of the volume
        * Delete one snapshot
        * List all snapshots created
        * Restart glusterd on all nodes
        * List all snapshots
          All snapshots must be listed except the one that was deleted
        """

        # pylint: disable=too-many-statements
        # Create snapshots
        for snap in self.snapshots:
            ret, _, _ = snap_create(self.mnode, self.volname, snap)
            self.assertEqual(ret, 0, ("Failed to create snapshot %s for "
                                      "volume %s" % (snap, self.volname)))
            g.log.info("Snapshot %s created successfully "
                       "for volume %s", snap, self.volname)

        # List the snapshots and validate with snapname
        snap_list = get_snap_list(self.mnode)
        self.assertIsNotNone(snap_list, "Failed to list all snapshots")
        self.assertEqual(len(snap_list), 3, "Failed to validate snap list")
        g.log.info("Successfully validated snap list")
        for snap in self.snapshots:
            self.assertIn(
                snap, snap_list, "Failed to validate the snapshot "
                "%s in the snapshot list" % snap)
        g.log.info("Successfully validated the presence of snapshots using "
                   "snapname")

        # Delete one snapshot
        ret, _, _ = snap_delete(self.mnode, self.snapshots[0])
        self.assertEqual(ret, 0,
                         ("Failed to delete snapshot %s" % self.snapshots[0]))
        g.log.info("Snapshots %s deleted Successfully", self.snapshots[0])

        # List the snapshots and validate with snapname
        snap_list = get_snap_list(self.mnode)
        self.assertIsNotNone(snap_list, "Failed to list all snapshots")
        self.assertEqual(len(snap_list), 2, "Failed to validate snap list")
        g.log.info("Successfully validated snap list")
        for snap in self.snapshots[1:]:
            self.assertIn(
                snap, snap_list, "Failed to validate the snapshot "
                "%s in the snapshot list" % snap)
        g.log.info("Successfully validated the presence of snapshots using "
                   "snapname")

        # Restart glusterd on all the servers
        ret = restart_glusterd(self.servers)
        self.assertTrue(
            ret, ("Failed to restart glusterd on nodes %s" % self.servers))
        g.log.info("Successfully restarted glusterd on nodes %s", self.servers)

        # Wait for glusterd to be online and validate glusterd running on all
        # server nodes
        self.assertTrue(
            wait_for_glusterd_to_start(self.servers),
            "Unexpected: glusterd not up on one or more of the nodes")
        g.log.info("Glusterd is up and running on all nodes")

        # Check if peers are connected
        self.assertTrue(is_peer_connected(self.mnode, self.servers),
                        "Unexpected: Peers are not in connected state")
        g.log.info("Successful: All peers are in connected state")

        # List the snapshots after glusterd restart
        # All snapshots must be listed except the one deleted
        for server in self.servers:
            snap_list = get_snap_list(server)
            self.assertIsNotNone(
                snap_list,
                "Failed to get the list of snapshots in node %s" % server)
            self.assertEqual(
                len(snap_list), 2,
                "Unexpected: Number of snapshots not consistent in the node %s"
                % server)
            g.log.info("Successfully validated snap list for node %s", server)
            for snap in self.snapshots[1:]:
                self.assertIn(
                    snap, snap_list, "Failed to validate the snapshot "
                    "%s in the snapshot list" % snap)
            g.log.info(
                "Successfully validated the presence of snapshots "
                "using snapname for node %s", server)
    def test_validate_snaps_max_limit(self):
        # pylint: disable=too-many-statements
        # Start IO on all mounts.
        all_mounts_procs = []
        count = 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" % (
                       self.script_upload_path, count,
                       mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system, cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # set config snap-max-hard-limit for 10 snpas
        cmd_str = ("gluster snapshot config snap-max-hard-limit 10"
                   " --mode=script")
        ret, _, _ = g.run(self.mnode, cmd_str)
        self.assertEqual(ret, 0, "Failed to set snap-max-hard-limit to 10.")
        g.log.info("snap-max-hard-limit successfully set for 10.")

        # set config snap-max-soft-limit to 50%
        cmd_str = ("gluster snapshot config snap-max-soft-limit 50"
                   " --mode=script")
        ret, _, _ = g.run(self.mnode, cmd_str)
        self.assertEqual(ret, 0, "Failed to set snap-max-soft-limit to 50%.")
        g.log.info("snap-max-soft-limit successfully set for 50%.")

        # Create 5 snaps
        for i in range(1, 6):
            cmd_str = "gluster snapshot create %s %s %s" % ("snapy%s" % i,
                                                            self.volname,
                                                            "no-timestamp")
            ret, _, _ = g.run(self.mnode, cmd_str)
            self.assertEqual(ret, 0, ("Failed to create snapshot for %s"
                                      % self.volname))
            g.log.info("Snapshot snapy%s created successfully"
                       " for volume  %s", i, self.volname)

        # Check for no. of snaps using snap_list it should be 5
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(5, len(snap_list), "Expected 5 snapshots. "
                         "Found %s snapshots" % len(snap_list))
        g.log.info("Successfully validated number of snapshots.")

        # Validate all 5 snap names created during
        for i in range(1, 6):
            self.assertTrue(("snapy%s" % i in snap_list), "%s snap not "
                            "found " % ("snapy%s" % i))
        g.log.info("Successfully validated names of snapshots")

        # create 6th snapshot
        cmd_str = "gluster snapshot create %s %s %s" % ("snapy6", self.volname,
                                                        "no-timestamp")
        ret, _, _ = g.run(self.mnode, cmd_str)
        self.assertEqual(ret, 0, ("Failed to create snap6 "
                                  "for %s" % self.volname))
        g.log.info("Snapshot 'snapy6' created as it is 6th snap")

        # set config snap-max-soft-limit to 100%
        cmd_str = ("gluster snapshot config snap-max-soft-limit 100"
                   " --mode=script")
        ret, _, _ = g.run(self.mnode, cmd_str)
        self.assertEqual(ret, 0, "Failed to set snap-max-soft-limit to 100%.")
        g.log.info("snap-max-soft-limit successfully set for 100%.")

        # create 7th snapshot
        cmd_str = "gluster snapshot create %s %s %s" % ("snapy7", self.volname,
                                                        "no-timestamp")
        ret, _, _ = g.run(self.mnode, cmd_str)
        self.assertEqual(ret, 0, ("Failed to create "
                                  "snap7 for %s" % self.volname))
        g.log.info("Snapshot 'snapy7' created as it is 7th snap")

        # Create 3 snaps
        for i in range(8, 11, 1):
            cmd_str = "gluster snapshot create %s %s %s" % ("snapy%s" % i,
                                                            self.volname,
                                                            "no-timestamp")
            ret, _, _ = g.run(self.mnode, cmd_str)
            self.assertEqual(ret, 0, ("Failed to create snapshot for %s"
                                      % self.volname))
            g.log.info("Snapshot snapy%s created successfully "
                       "for volume  %s", i, self.volname)

        # Check for no. of snaps using snap_list it should be 10
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(len(snap_list), 10, "Expected 10 snapshots. "
                         "found %s snapshots" % len(snap_list))
        g.log.info("Successfully validated number of snapshots.")

        # Validate all 10 snap names created
        for i in range(1, 11, 1):
            self.assertTrue(("snapy%s" % i in snap_list), "%s snap not "
                            "found " % ("snapy%s" % i))
        g.log.info("Successfully validated names of snapshots")

        # create 11th snapshot
        cmd_str = "gluster snapshot create %s %s %s" % ("snap", self.volname,
                                                        "no-timestamp")
        ret, _, _ = g.run(self.mnode, cmd_str)
        self.assertNotEqual(ret, 0, ("Unexpected: successfully created 'snap' "
                                     "for %s" % self.volname))
        g.log.info("Expected: Snapshot 'snap' not created as it is 11th snap")

        # Check for no. of snaps using snap_list it should be 10
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(len(snap_list), 10, "Expected 10 snapshots. "
                         "found %s snapshots" % len(snap_list))
        g.log.info("Successfully validated number of snapshots.")

        # modify config snap-max-hard-limit for 20 snpas
        cmd_str = ("gluster snapshot config snap-max-hard-limit 20"
                   " --mode=script")
        ret, _, _ = g.run(self.mnode, cmd_str)
        self.assertEqual(ret, 0, "Failed to set snap-max-hard-limit to 20.")
        g.log.info("snap-max-hard-limit successfully set for 20.")

        # Create 10 snaps
        for i in range(11, 21, 1):
            cmd_str = "gluster snapshot create %s %s %s" % ("snapy%s" % i,
                                                            self.volname,
                                                            "no-timestamp")
            ret, _, _ = g.run(self.mnode, cmd_str)
            self.assertEqual(ret, 0, ("Failed to create snapshot for %s"
                                      % self.volname))
            g.log.info("Snapshot snapy%s created successfully for "
                       "volume  %s", i, self.volname)

        # Check for no. of snaps using snap_list it should be 20
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(len(snap_list), 20, "Expected 20 snapshots. "
                         "found %s snapshots" % len(snap_list))
        g.log.info("Successfully validated number of snaps.")