Esempio n. 1
0
    def test_ec_uss_snapshot(self):
        """
        - Start resource consumption tool
        - Create directory dir1
        - Create 5 directory and 5 files in dir of mountpoint
        - Rename all files inside dir1 at mountpoint
        - Create softlink and hardlink of files in dir1 of mountpoint
        - Delete op for deleting all file in one of the dirs inside dir1
        - Create tiny, small, medium and large file
        - Create IO's
        - Enable USS
        - Create a Snapshot
        - Activate Snapshot
        - List snapshot and the contents inside snapshot
        - Delete Snapshot
        - Create Snapshot with same name
        - Activate Snapshot
        - List snapshot and the contents inside snapshot
        - Validating IO's and waiting for it to complete
        - Close connection and check file exist for memory log
        """
        # pylint: disable=too-many-branches,too-many-statements,too-many-locals
        # Starting resource consumption using top
        log_file_mem_monitor = '/var/log/glusterfs/mem_usage.log'
        cmd = ("for i in {1..20};do top -n 1 -b|egrep "
               "'RES|gluster' & free -h 2>&1 >> %s ;"
               "sleep 10;done" % (log_file_mem_monitor))
        g.log.info(cmd)
        cmd_list_procs = []
        for server in self.servers:
            proc = g.run_async(server, cmd)
            cmd_list_procs.append(proc)

        # Creating dir1
        ret = mkdir(self.mounts[0].client_system,
                    "%s/dir1" % self.mounts[0].mountpoint)
        self.assertTrue(ret, "Failed to create dir1")
        g.log.info("Directory dir1 on %s created successfully", self.mounts[0])

        # Create 5 dir and 5 files in each dir at mountpoint on dir1
        start, end = 1, 5
        for mount_obj in self.mounts:
            # Number of dir and files to be created.
            dir_range = ("%s..%s" % (str(start), str(end)))
            file_range = ("%s..%s" % (str(start), str(end)))
            # Create dir 1-5 at mountpoint.
            ret = mkdir(mount_obj.client_system,
                        "%s/dir1/dir{%s}" % (mount_obj.mountpoint, dir_range))
            self.assertTrue(ret, "Failed to create directory")
            g.log.info("Directory created successfully")

            # Create files inside each dir.
            cmd = ('touch %s/dir1/dir{%s}/file{%s};' %
                   (mount_obj.mountpoint, dir_range, file_range))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "File creation failed")
            g.log.info("File created successfull")

            # Increment counter so that at next client dir and files are made
            # with diff offset. Like at next client dir will be named
            # dir6, dir7...dir10. Same with files.
            start += 5
            end += 5

        # Rename all files inside dir1 at mountpoint on dir1
        cmd = ('cd %s/dir1/dir1/; '
               'for FILENAME in *;'
               'do mv $FILENAME Unix_$FILENAME;'
               'done;' % self.mounts[0].mountpoint)
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Failed to rename file on " "client")
        g.log.info("Successfully renamed file on client")

        # Truncate at any dir in mountpoint inside dir1
        # start is an offset to be added to dirname to act on
        # diff files at diff clients.
        start = 1
        for mount_obj in self.mounts:
            cmd = ('cd %s/dir1/dir%s/; '
                   'for FILENAME in *;'
                   'do echo > $FILENAME;'
                   'done;' % (mount_obj.mountpoint, str(start)))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Truncate failed")
            g.log.info("Truncate of files successfull")

        # Create softlink and hardlink of files in mountpoint. Start is an
        # offset to be added to dirname to act on diff files at diff clients.
        start = 1
        for mount_obj in self.mounts:
            cmd = ('cd %s/dir1/dir%s; '
                   'for FILENAME in *; '
                   'do ln -s $FILENAME softlink_$FILENAME;'
                   'done;' % (mount_obj.mountpoint, str(start)))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Creating Softlinks have failed")
            g.log.info("Softlink of files have been changed successfully")

            cmd = ('cd %s/dir1/dir%s; '
                   'for FILENAME in *; '
                   'do ln $FILENAME hardlink_$FILENAME;'
                   'done;' % (mount_obj.mountpoint, str(start + 1)))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Creating Hardlinks have failed")
            g.log.info("Hardlink of files have been changed successfully")
            start += 5

        # Create tiny, small, medium and large file
        # at mountpoint. Offset to differ filenames
        # at diff clients.
        offset = 1
        for mount_obj in self.mounts:
            cmd = 'fallocate -l 100 tiny_file%s.txt' % str(offset)
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Fallocate for tiny files failed")
            g.log.info("Fallocate for tiny files successfully")

            cmd = 'fallocate -l 20M small_file%s.txt' % str(offset)
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Fallocate for small files failed")
            g.log.info("Fallocate for small files successfully")

            cmd = 'fallocate -l 200M medium_file%s.txt' % str(offset)
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Fallocate for medium files failed")
            g.log.info("Fallocate for medium files successfully")

            cmd = 'fallocate -l 1G large_file%s.txt' % str(offset)
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Fallocate for large files failed")
            g.log.info("Fallocate for large files successfully")
            offset += 1

    # Creating files on client side for dir1
    # Write IO
        all_mounts_procs, count = [], 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s/dir1" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count += 10

        # Enable USS
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable USS on volume")
        g.log.info("Successfully enabled USS on volume")

        # Create Snapshot
        ret, _, _ = snap_create(self.mnode,
                                self.volname,
                                "ec_snap",
                                timestamp=False)
        self.assertEqual(ret, 0, "Failed to create snapshot ec_snap")
        g.log.info("Snapshot ec_snap of volume %s created"
                   "successfully.", self.volname)

        # Activate snapshot
        ret, _, _ = snap_activate(self.mnode, "ec_snap")
        self.assertEqual(ret, 0, "Failed to activate snapshot ec_snap")
        g.log.info("Snapshot activated successfully")

        # List contents inside snaphot and wait before listing
        sleep(5)
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(
                ret, 0, "Directory Listing Failed for"
                " Activated Snapshot")
            self.assertIn(
                "ec_snap", out.split("\n"), "Failed to "
                "validate ec_snap under .snaps directory")
            g.log.info("Activated Snapshot listed Successfully")

        # Delete Snapshot ec_snap
        ret, _, _ = snap_delete(self.mnode, "ec_snap")
        self.assertEqual(ret, 0, "Failed to delete snapshot")
        g.log.info("Snapshot deleted Successfully")

        # Creating snapshot with the same name
        ret, _, _ = snap_create(self.mnode,
                                self.volname,
                                "ec_snap",
                                timestamp=False)
        self.assertEqual(ret, 0, "Failed to create snapshot ec_snap")
        g.log.info("Snapshot ec_snap of volume %s created"
                   "successfully.", self.volname)

        # Activate snapshot ec_snap
        ret, _, _ = snap_activate(self.mnode, "ec_snap")
        self.assertEqual(ret, 0, "Failed to activate snapshot ec_snap")
        g.log.info("Snapshot activated successfully")

        # List contents inside ec_snap and wait before listing
        sleep(5)
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(
                ret, 0, "Directory Listing Failed for"
                " Activated Snapshot")
            self.assertIn(
                "ec_snap", out.split('\n'), "Failed to "
                "validate ec_snap under .snaps directory")
            g.log.info("Activated Snapshot listed Successfully")

        # Validating IO's and waiting to complete
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Close connection and check file exist for memory log
        ret = file_exists(self.mnode, '/var/log/glusterfs/mem_usage.log')
        self.assertTrue(ret, "Unexpected:Memory log file does " "not exist")
        g.log.info("Memory log file exists")
        for proc in cmd_list_procs:
            ret, _, _ = proc.async_communicate()
            self.assertEqual(ret, 0, "Memory logging failed")
            g.log.info("Memory logging is successful")
    def test_snap_rebalance(self):
        # pylint: disable=too-many-statements, too-many-locals
        """

        Snapshot rebalance contains tests which verifies snapshot clone,
        creating snapshot and performing I/O on mountpoints

        Steps:

        1. Create snapshot of a volume
        2. Activate snapshot
        3. Clone snapshot and Activate
        4. Mount Cloned volume
        5. Perform I/O on mount point
        6. Calculate areequal for bricks and mountpoints
        7. Add-brick more brick to cloned volume
        8. Initiate Re-balance
        9. validate areequal of bricks and mountpoints
        """

        # Creating snapshot:
        g.log.info("Starting to Create snapshot")
        ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
        self.assertEqual(
            ret, 0, ("Failed to create snapshot for volume %s" % self.volname))
        g.log.info("Snapshot %s created successfully for volume %s", self.snap,
                   self.volname)

        # Activating snapshot
        g.log.info("Starting to Activate Snapshot")
        ret, _, _ = snap_activate(self.mnode, self.snap)
        self.assertEqual(ret, 0,
                         ("Failed to Activate snapshot %s" % self.snap))
        g.log.info("Snapshot %s activated successfully", self.snap)

        # Creating a Clone of snapshot:
        g.log.info("creating Clone Snapshot")
        ret, _, _ = snap_clone(self.mnode, self.snap, self.clone)
        self.assertEqual(ret, 0, ("Failed to clone volume %s" % self.clone))
        g.log.info("clone volume %s created successfully", self.clone)

        # Starting clone volume
        g.log.info("starting clone volume")
        ret, _, _ = volume_start(self.mnode, self.clone)
        self.assertEqual(ret, 0, "Failed to start %s" % self.clone)
        g.log.info("clone volume %s started successfully", self.clone)

        # Mounting a clone volume
        g.log.info("Mounting created clone volume")
        ret, _, _ = mount_volume(self.clone, self.mount_type, self.mount1,
                                 self.mnode, self.clients[0])
        self.assertEqual(ret, 0,
                         "clone Volume mount failed for %s" % self.clone)
        g.log.info("cloned volume %s mounted Successfully", self.clone)

        # Validate clone volume mounted or not
        g.log.info("Validate clone volume mounted or not")
        ret = is_mounted(self.clone, self.mount1, self.mnode, self.clients[0],
                         self.mount_type)
        self.assertTrue(
            ret, "Cloned Volume not mounted on mount point: %s" % self.mount1)
        g.log.info("Cloned Volume %s mounted on %s", self.clone, self.mount1)

        # write files to mountpoint
        g.log.info("Starting IO on %s mountpoint...", self.mount1)
        all_mounts_procs = []
        cmd = ("/usr/bin/env python %s create_files "
               "-f 10 --base-file-name file %s" %
               (self.script_upload_path, self.mount1))
        proc = g.run(self.clients[0], cmd)
        all_mounts_procs.append(proc)

        self.check_arequal()

        # expanding volume
        g.log.info("Starting to expand volume")
        ret = expand_volume(self.mnode, self.volname, self.servers,
                            self.all_servers_info)
        self.assertTrue(ret, "Failed to expand volume %s" % self.clone)
        g.log.info("Expand volume successful")

        ret, _, _ = rebalance_start(self.mnode, self.clone)
        self.assertEqual(ret, 0, "Failed to start rebalance")
        g.log.info("Successfully started rebalance on the "
                   "volume %s", self.clone)

        # Log Rebalance status
        g.log.info("Log Rebalance status")
        _, _, _ = rebalance_status(self.mnode, self.clone)

        # Wait for rebalance to complete
        g.log.info("Waiting for rebalance to complete")
        ret = wait_for_rebalance_to_complete(self.mnode, self.clone)
        self.assertTrue(ret, ("Rebalance is not yet complete "
                              "on the volume %s", self.clone))
        g.log.info("Rebalance is successfully complete on "
                   "the volume %s", self.clone)

        # Check Rebalance status after rebalance is complete
        g.log.info("Checking Rebalance status")
        ret, _, _ = rebalance_status(self.mnode, self.clone)
        self.assertEqual(ret, 0, ("Failed to get rebalance status for "
                                  "the volume %s", self.clone))
        g.log.info("Successfully got rebalance status of the "
                   "volume %s", self.clone)

        self.check_arequal()
    def test_snap_glusterd_down(self):
        # pylint: disable=too-many-statements
        """
        Steps:

        1. create a volume
        2. mount volume
        3. create snapshot of that volume
        4. validate using snapshot info
        5. Activate snapshot
        6. List all snapshots present
        7. validate using snapshot info
        8. Stop glusterd on one node
        9. Check glusterd status
       10. deactivate created snapshot
       11. Start glusterd on that node
       12. Check glusterd status
       13. validate using snapshot info
       13. Check all peers are connected

        """
        # Creating snapshot:
        g.log.info("Starting to Create snapshot")
        ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
        self.assertEqual(ret, 0, ("Failed to create snapshot %s for volume %s"
                                  % (self.snap, self.volname)))
        g.log.info("Snapshot %s created successfully "
                   "for volume %s", self.snap, self.volname)

        # Check snapshot info
        g.log.info("Checking snapshot info")
        snap_info = get_snap_info_by_snapname(self.mnode, self.snap)
        self.assertIsNotNone(snap_info, "Failed to get snap information"
                             "for snapshot %s" % self.snap)
        status = snap_info['snapVolume']['status']
        self.assertNotEqual(status, 'Started', "snapshot %s "
                            "not started" % self.snap)
        g.log.info("Successfully checked snapshot info")

        # Activating snapshot
        g.log.info("Starting to Activate Snapshot")
        ret, _, _ = snap_activate(self.mnode, self.snap)
        self.assertEqual(ret, 0, ("Failed to Activate snapshot %s"
                                  % self.snap))
        g.log.info("Snapshot %s activated successfully", self.snap)

        # snapshot list
        g.log.info("Starting to validate list of snapshots")
        snap_list1 = get_snap_list(self.mnode)
        self.assertIsNotNone(snap_list1, "Failed to list all the snapshot")
        self.assertEqual(len(snap_list1), 1, "Failed to validate snap list")
        g.log.info("Snapshot list successfully validated")

        # Check snapshot info
        g.log.info("Checking snapshot info")
        snap_info = get_snap_info_by_snapname(self.mnode, self.snap)
        status = snap_info['snapVolume']['status']
        self.assertEqual(status, 'Started', "Failed to"
                         "start snapshot info")
        g.log.info("Successfully checked snapshot info")

        # Stop Glusterd on one node
        g.log.info("Stopping Glusterd on one node")
        ret = stop_glusterd(self.servers[1])

        # Check Glusterd status
        g.log.info("Check glusterd running or not")
        count = 0
        while count < 80:
            ret = is_glusterd_running(self.servers[1])
            if ret == 1:
                break
            time.sleep(2)
            count += 2
        self.assertEqual(ret, 1, "Unexpected: glusterd running on node %s" %
                         self.servers[1])
        g.log.info("Expected: Glusterd not running on node %s",
                   self.servers[1])

        # de-activating snapshot
        g.log.info("Starting to de-activate Snapshot")
        ret, _, _ = snap_deactivate(self.mnode, self.snap)
        self.assertEqual(ret, 0, ("Failed to deactivate snapshot %s"
                                  % self.snap))
        g.log.info("Snapshot %s deactivated successfully", self.snap)

        # validate snapshot info
        g.log.info("Checking snapshot info")
        snap_info = get_snap_info_by_snapname(self.mnode, self.snap)
        status = snap_info['snapVolume']['status']
        self.assertNotEqual(status, 'Started', "snapshot %s "
                            "not started" % self.snap)
        g.log.info("Successfully validated snapshot info")

        # Start Glusterd on node
        g.log.info("Starting Glusterd on node %s", self.servers[1])
        ret = start_glusterd(self.servers[1])
        self.assertTrue(ret, "Failed to start glusterd on %s node"
                        % self.servers[1])
        g.log.info("Successfully started glusterd on "
                   "%s node", self.servers[1])

        # Check Glusterd status
        g.log.info("Check glusterd running or not")
        count = 0
        while count < 80:
            ret = is_glusterd_running(self.servers[1])
            if ret:
                break
            time.sleep(2)
            count += 2
        self.assertEqual(ret, 0, "glusterd not running on node %s "
                         % self.servers[1])
        g.log.info("glusterd is running on %s node",
                   self.servers[1])

        # validate snapshot info
        g.log.info("Checking snapshot info")
        snap_info = get_snap_info_by_snapname(self.mnode, self.snap)
        self.assertIsNotNone(snap_info, "Failed to get snap info for"
                             " snapshot %s" % self.snap)
        status = snap_info['snapVolume']['status']
        self.assertNotEqual(status, 'Started', "snapshot"
                            " %s failed to validate with snap info"
                            % self.snap)
        g.log.info("Successfully validated snapshot info")

        # Check all the peers are in connected state
        g.log.info("Validating all the peers are in connected state")
        for servers in self.servers:
            count = 0
            while count < 80:
                ret = is_peer_connected(self.mnode, servers)
                if ret:
                    break
                time.sleep(2)
                count += 2
            self.assertTrue(ret, "All the nodes are not in cluster")
        g.log.info("Successfully validated all the peers")
    def test_clone_delete_snap(self):
        """
        clone from snap of one volume
        * Create and Mount the volume
        * Enable some volume options
        * Creating 2 snapshots and activate
        * reset the volume
        * create a clone of snapshots created
        * Mount both the clones
        * Perform I/O on mount point
        * Check volume options of cloned volumes
        * Create snapshot of the cloned snapshot volume
        * cleanup snapshots and volumes
        """

        # pylint: disable=too-many-statements, too-many-locals
        # Enabling Volume options on the volume and validating
        g.log.info("Enabling volume options for volume %s ", self.volname)
        options = {" features.uss": "enable"}
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(
            ret, ("Failed to set volume options for volume %s" % self.volname))
        g.log.info("Successfully set volume options"
                   "for volume %s", self.volname)

        # Validate feature.uss enabled or not
        g.log.info("Validating feature.uss is enabled")
        option = "features.uss"
        vol_option = get_volume_options(self.mnode, self.volname, option)
        self.assertEqual(vol_option['features.uss'], 'enable', "Failed"
                         " to validate "
                         "volume options")
        g.log.info("Successfully validated volume options"
                   "for volume %s", self.volname)

        # Creating snapshot
        g.log.info("Starting to Create snapshot")
        for snap_count in range(0, 2):
            ret, _, _ = snap_create(self.mnode, self.volname,
                                    "snap%s" % snap_count)
            self.assertEqual(
                ret, 0,
                ("Failed to create snapshot for volume %s" % self.volname))
            g.log.info("Snapshot snap%s created successfully"
                       "for volume %s", snap_count, self.volname)

        # Activating snapshot
        g.log.info("Starting to Activate Snapshot")
        for snap_count in range(0, 2):
            ret, _, _ = snap_activate(self.mnode, "snap%s" % snap_count)
            self.assertEqual(
                ret, 0, ("Failed to Activate snapshot snap%s" % snap_count))
            g.log.info("Snapshot snap%s activated successfully", snap_count)

        # Reset volume:
        g.log.info("Starting to Reset Volume")
        ret, _, _ = volume_reset(self.mnode, self.volname, force=False)
        self.assertEqual(ret, 0, ("Failed to reset volume %s" % self.volname))
        g.log.info("Reset Volume on volume %s is Successful", self.volname)

        # Validate feature.uss enabled or not
        g.log.info("Validating feature.uss is enabled")
        option = "features.uss"
        vol_option = get_volume_options(self.mnode, self.volname, option)
        self.assertEqual(vol_option['features.uss'], 'off', "Failed"
                         " to validate "
                         "volume options")
        g.log.info("Successfully validated volume options"
                   "for volume %s", self.volname)

        # Verify volume's all process are online
        g.log.info("Starting to Verify volume's all process are online")
        ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
        self.assertTrue(ret, ("Volume %s : All process are"
                              "not online" % self.volname))
        g.log.info("Volume %s : All process are online", self.volname)

        # Creating and starting a Clone of snapshot
        g.log.info("Starting to Clone Snapshot")
        for clone_count in range(0, 2):
            ret, _, _ = snap_clone(self.mnode, "snap%s" % clone_count,
                                   "clone%s" % clone_count)
            self.assertEqual(ret, 0,
                             ("Failed to clone clone%s volume" % clone_count))
            g.log.info("clone%s volume created successfully", clone_count)

        # Start Cloned volume
        g.log.info("starting to Validate clone volumes are started")
        for clone_count in range(0, 2):
            ret, _, _ = volume_start(self.mnode, "clone%s" % clone_count)
            self.assertEqual(ret, 0, ("Failed to start clone%s" % clone_count))
            g.log.info("clone%s started successfully", clone_count)
        g.log.info("All the clone volumes are started Successfully")

        # Validate Volume start of cloned volume
        g.log.info("Starting to Validate Volume start")
        for clone_count in range(0, 2):
            vol_info = get_volume_info(self.mnode, "clone%s" % clone_count)
            if vol_info["clone%s" % clone_count]['statusStr'] != 'Started':
                raise ExecutionError("Failed to get volume info for clone%s" %
                                     clone_count)
            g.log.info("Volume clone%s is in Started state", clone_count)

        # Validate feature.uss enabled or not
        g.log.info("Validating feature.uss is enabled")
        option = "features.uss"
        for clone_count in range(0, 2):
            vol_option = get_volume_options(self.mnode,
                                            "clone%s" % clone_count, option)
            self.assertEqual(vol_option['features.uss'], 'enable', "Failed"
                             " to validate"
                             "volume options")
            g.log.info(
                "Successfully validated volume options"
                "for volume clone%s", clone_count)

        # Mount both the cloned volumes
        g.log.info("Mounting Cloned Volumes")
        for mount_obj in range(0, 2):
            self.mpoint = "/mnt/clone%s" % mount_obj
            cmd = "mkdir -p  %s" % self.mpoint
            ret, _, _ = g.run(self.clients[0], cmd)
            self.assertEqual(ret, 0, ("Creation of directory %s"
                                      "for mounting"
                                      "volume %s failed: Directory already"
                                      "present" %
                                      (self.mpoint, "clone%s" % mount_obj)))
            g.log.info(
                "Creation of directory %s for mounting volume %s "
                "success", self.mpoint, ("clone%s" % mount_obj))
            ret, _, _ = mount_volume("clone%s" % mount_obj, self.mount_type,
                                     self.mpoint, self.mnode, self.clients[0])
            self.assertEqual(ret, 0, ("clone%s is not mounted" % mount_obj))
            g.log.info("clone%s is mounted Successfully", mount_obj)

        # Perform I/O on mount
        # Start I/O on all mounts
        g.log.info("Starting to Perform I/O on Mountpoint")
        all_mounts_procs = []
        for mount_obj in range(0, 2):
            cmd = ("cd /mnt/clone%s/; for i in {1..10};"
                   "do touch file$i; done; cd;") % mount_obj
            proc = g.run(self.clients[0], cmd)
            all_mounts_procs.append(proc)
        g.log.info("I/O on mountpoint is successful")

        # create snapshot
        g.log.info("Starting to Create snapshot of clone volume")
        ret0, _, _ = snap_create(self.mnode, "clone0", "snap2")
        self.assertEqual(ret0, 0, "Failed to create the snapshot"
                         "snap2 from clone0")
        g.log.info("Snapshots snap2 created successfully from clone0")
        ret1, _, _ = snap_create(self.mnode, "clone1", "snap3")
        self.assertEqual(ret1, 0, "Failed to create the snapshot snap3"
                         "from clone1")
        g.log.info("Snapshots snap3 created successfully from clone1")

        # Listing all Snapshots present
        g.log.info("Starting to list all snapshots")
        ret, _, _ = snap_list(self.mnode)
        self.assertEqual(ret, 0, ("Failed to list snapshots present"))
        g.log.info("Snapshots successfully listed")
    def test_snap_info_from_detached_node(self):
        # pylint: disable=too-many-statements
        """
        Create a volume with single brick
        Create a snapshot
        Activate the snapshot created
        Enabled uss on the volume
        Validated snap info on all the nodes
        Peer detach one node
        Validate /var/lib/glusterd/snaps on the detached node
        Probe the detached node
        """

        # Creating volume with single brick on one node
        servers_info_single_node = {
            self.servers[0]: self.all_servers_info[self.servers[0]]
        }
        bricks_list = form_bricks_list(self.mnode, self.volname, 1,
                                       self.servers[0],
                                       servers_info_single_node)
        ret, _, _ = volume_create(self.servers[0], self.volname, bricks_list)
        self.assertEqual(ret, 0, "Volume creation failed")
        g.log.info("Volume %s created successfully", self.volname)

        # Create a snapshot of the volume without volume start should fail
        self.snapname = "snap1"
        ret, _, _ = snap_create(self.mnode,
                                self.volname,
                                self.snapname,
                                timestamp=False)
        self.assertNotEqual(ret, 0,
                            "Snapshot created without starting the volume")
        g.log.info("Snapshot creation failed as expected")

        # Start the volume
        ret, _, _ = volume_start(self.mnode, self.volname)
        self.assertEqual(ret, 0,
                         "Failed to start the volume %s" % self.volname)
        g.log.info("Volume start succeeded")

        # Create a snapshot of the volume after volume start
        ret, _, _ = snap_create(self.mnode,
                                self.volname,
                                self.snapname,
                                timestamp=False)
        self.assertEqual(
            ret, 0, "Snapshot creation failed on the volume %s" % self.volname)
        g.log.info("Snapshot create succeeded")

        # Activate snapshot created
        ret, _, err = snap_activate(self.mnode, self.snapname)
        self.assertEqual(
            ret, 0, "Snapshot activate failed with following error %s" % (err))
        g.log.info("Snapshot activated successfully")

        # Enable uss
        self.vol_options['features.uss'] = 'enable'
        ret = set_volume_options(self.mnode, self.volname, self.vol_options)
        self.assertTrue(
            ret, "gluster volume set %s features.uss "
            "enable failed" % self.volname)
        g.log.info("gluster volume set %s features.uss "
                   "enable successfully", self.volname)

        # Validate files /var/lib/glusterd/snaps on all the servers is same
        self.pathname = "/var/lib/glusterd/snaps/%s" % self.snapname
        for server in self.servers:
            conn = g.rpyc_get_connection(server)
            ret = conn.modules.os.path.isdir(self.pathname)
            self.assertTrue(
                ret, "%s directory doesn't exist on node %s" %
                (self.pathname, server))
            g.log.info("%s path exists on node %s", self.pathname, server)
        g.rpyc_close_deployed_servers()

        # Peer detach one node
        self.random_node_peer_detach = random.choice(self.servers[1:])
        ret = peer_detach_servers(self.mnode,
                                  self.random_node_peer_detach,
                                  validate=True)
        self.assertTrue(
            ret,
            "Peer detach of node: %s failed" % self.random_node_peer_detach)
        g.log.info("Peer detach succeeded")

        # /var/lib/glusterd/snaps/<snapname> directory should not present
        conn = g.rpyc_get_connection(self.random_node_peer_detach)
        ret = conn.modules.os.path.isdir(self.pathname)
        self.assertFalse(
            ret, "%s directory should not exist on the peer"
            "which is detached from cluster%s" %
            (self.pathname, self.random_node_peer_detach))
        g.log.info("Expected: %s path doesn't exist on peer detached node %s",
                   self.pathname, self.random_node_peer_detach)
        g.rpyc_close_deployed_servers()

        # Peer probe the detached node
        ret, _, _ = peer_probe(self.mnode, self.random_node_peer_detach)
        self.assertEqual(
            ret, 0,
            "Peer probe of node: %s failed" % self.random_node_peer_detach)
        g.log.info("Peer probe succeeded")

        # Validating peers are in connected state
        count = 0
        while count < 10:
            sleep(2)
            ret = self.validate_peers_are_connected()
            if ret:
                break
            count += 1
        self.assertTrue(ret, "Peers are not in connected state")
        g.log.info("Peer are in connected state")
    def test_snap_self_heal(self):
        """
        Steps:

        1. create a volume
        2. mount volume
        3. create snapshot of that volume
        4. Activate snapshot
        5. Clone snapshot and Mount
        6. Perform I/O
        7. Bring Down Few bricks from volume without
           affecting the volume or cluster.
        8. Perform I/O
        9. Bring back down bricks to online
        10. Validate heal is complete with areequal

        """
        # pylint: disable=too-many-statements, too-many-locals
        # Creating snapshot:
        g.log.info("Starting to Create snapshot")
        ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
        self.assertEqual(
            ret, 0, ("Failed to create snapshot for volume %s" % self.volname))
        g.log.info("Snapshot %s created successfully for volume %s", self.snap,
                   self.volname)

        # Activating snapshot
        g.log.info("Starting to Activate Snapshot")
        ret, _, _ = snap_activate(self.mnode, self.snap)
        self.assertEqual(ret, 0,
                         ("Failed to Activate snapshot %s" % self.snap))
        g.log.info("Snapshot %s activated successfully", self.snap)

        # snapshot list
        ret, _, _ = snap_list(self.mnode)
        self.assertEqual(ret, 0, ("Failed to list all the snapshot"))
        g.log.info("Snapshot list command was successful")

        # Creating a Clone volume from snapshot:
        g.log.info("Starting to Clone volume from Snapshot")
        ret, _, _ = snap_clone(self.mnode, self.snap, self.clone)
        self.assertEqual(ret, 0, ("Failed to clone %s from snapshot %s" %
                                  (self.clone, self.snap)))
        g.log.info("%s created successfully", self.clone)

        #  start clone volumes
        g.log.info("start to created clone volumes")
        ret, _, _ = volume_start(self.mnode, self.clone)
        self.assertEqual(ret, 0, "Failed to start clone %s" % self.clone)
        g.log.info("clone volume %s started successfully", self.clone)

        # Mounting a clone volume
        g.log.info("Mounting a clone volume")
        ret, _, _ = mount_volume(self.clone, self.mount_type, self.mount1,
                                 self.mnode, self.clients[0])
        self.assertEqual(ret, 0,
                         "Failed to mount clone Volume %s" % self.clone)
        g.log.info("Clone volume %s mounted Successfully", self.clone)

        # Checking cloned volume mounted or not
        ret = is_mounted(self.clone, self.mount1, self.mnode, self.clients[0],
                         self.mount_type)
        self.assertTrue(
            ret,
            "Failed to mount clone volume on mount point: %s" % self.mount1)
        g.log.info("clone Volume %s mounted on %s", self.clone, self.mount1)

        # write files on all mounts
        g.log.info("Starting IO on all mounts...")
        g.log.info("mounts: %s", self.mount1)
        all_mounts_procs = []
        cmd = ("python %s create_files "
               "-f 10 --base-file-name file %s" %
               (self.script_upload_path, self.mount1))
        proc = g.run(self.clients[0], cmd)
        all_mounts_procs.append(proc)
        g.log.info("Successful in creating I/O on mounts")

        # get the bricks from the volume
        g.log.info("Fetching bricks for the volume : %s", self.clone)
        bricks_list = get_all_bricks(self.mnode, self.clone)
        g.log.info("Brick List : %s", bricks_list)

        # Select bricks to bring offline
        g.log.info("Starting to bring bricks to offline")
        bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
            self.mnode, self.volname))
        bricks_to_bring_offline = filter(
            None, (bricks_to_bring_offline_dict['hot_tier_bricks'] +
                   bricks_to_bring_offline_dict['cold_tier_bricks'] +
                   bricks_to_bring_offline_dict['volume_bricks']))
        g.log.info("Brick to bring offline: %s ", bricks_to_bring_offline)
        ret = bring_bricks_offline(self.clone, bricks_to_bring_offline)
        self.assertTrue(ret, "Failed to bring the bricks offline")
        g.log.info("Successful in bringing bricks: %s offline",
                   bricks_to_bring_offline)

        # Offline Bricks list
        offline_bricks = get_offline_bricks_list(self.mnode, self.clone)
        self.assertIsNotNone(
            offline_bricks, "Failed to get offline bricklist"
            "for volume %s" % self.clone)
        for bricks in offline_bricks:
            self.assertIn(bricks, bricks_to_bring_offline,
                          "Failed to validate "
                          "Bricks offline")
        g.log.info("Bricks Offline: %s", offline_bricks)

        # Online Bricks list
        online_bricks = get_online_bricks_list(self.mnode, self.clone)
        self.assertIsNotNone(
            online_bricks, "Failed to get online bricks"
            " for volume %s" % self.clone)
        g.log.info("Bricks Online: %s", online_bricks)

        # write files mountpoint
        g.log.info("Starting IO on all mounts...")
        g.log.info("mounts: %s", self.mount1)
        all_mounts_procs = []
        cmd = ("python %s create_files "
               "-f 10 --base-file-name file %s" %
               (self.script_upload_path, self.mount1))
        proc = g.run(self.clients[0], cmd)
        all_mounts_procs.append(proc)
        g.log.info("Successful in creating I/O on mounts")

        # Bring all bricks online
        g.log.info("bring all bricks online")
        ret = bring_bricks_online(self.mnode, self.clone,
                                  bricks_to_bring_offline)
        self.assertTrue(ret, "Failed to bring bricks online")
        g.log.info("Successful in bringing all bricks online")

        # Validate Bricks are online
        g.log.info("Validating all bricks are online")
        ret = are_bricks_online(self.mnode, self.clone, bricks_list)
        self.assertTrue(ret, "Failed to bring all the bricks online")
        g.log.info("bricks online: %s", bricks_list)

        # Wait for volume processes to be online
        g.log.info("Wait for volume processes to be online")
        ret = wait_for_volume_process_to_be_online(self.mnode, self.clone)
        self.assertTrue(ret, ("Failed to wait for volume %s processes to "
                              "be online" % self.clone))
        g.log.info(
            "Successful in waiting for volume %s processes to be "
            "online", self.clone)

        # Verify volume's all process are online
        g.log.info("Verifying volume's all process are online")
        ret = verify_all_process_of_volume_are_online(self.mnode, self.clone)
        self.assertTrue(
            ret, ("Volume %s : All process are not online" % self.clone))
        g.log.info("Volume %s : All process are online", self.clone)

        # wait for the heal process to complete
        g.log.info("waiting for heal process to complete")
        ret = monitor_heal_completion(self.mnode, self.volname)
        self.assertTrue(ret, "Failed to complete the heal process")
        g.log.info("Successfully completed heal process")

        # Check areequal
        # get the subvolumes
        g.log.info("Starting to get sub-volumes for volume %s", self.clone)
        subvols = get_subvols(self.mnode, self.clone)
        num_subvols = len(subvols['volume_subvols'])
        g.log.info("Number of subvolumes in volume %s:", num_subvols)

        # Get arequals and compare
        g.log.info("Starting to Compare areequals")
        for i in range(0, num_subvols):
            # Get arequal for first brick
            subvol_brick_list = subvols['volume_subvols'][i]
            node, brick_path = subvol_brick_list[0].split(':')
            command = ('arequal-checksum -p %s '
                       '-i .glusterfs -i .landfill -i .trashcan' % brick_path)
            ret, arequal, _ = g.run(node, command)
            first_brick_total = arequal.splitlines()[-1].split(':')[-1]

        # Get arequal for every brick and compare with first brick
        for brick in subvol_brick_list:
            node, brick_path = brick.split(':')
            command = ('arequal-checksum -p %s '
                       '-i .glusterfs -i .landfill -i .trashcan' % brick_path)
            ret, brick_arequal, _ = g.run(node, command)
            self.assertFalse(ret, 'Failed to get arequal on brick %s' % brick)
            g.log.info('Getting arequal for %s is successful', brick)
            brick_total = brick_arequal.splitlines()[-1].split(':')[-1]
            self.assertEqual(
                first_brick_total, brick_total,
                'Arequals for subvol and %s are not equal' % brick)
            g.log.info('Arequals for subvol and %s are equal', brick)
        g.log.info('All arequals are equal for distributed-replicated')
Esempio n. 7
0
    def test_snap_uss_while_io(self):
        # pylint: disable=too-many-statements
        """
        Steps:
        1. Create volume
        2. enable uss on created volume
        3. validate uss running
        4. validate snapd running on all nodes
        5. perform io on mounts
        6. create 10 snapshots with description
        7. validate with snapshot list
        8. validate io is completed
        9. Activate snapshots to list all snaps
           under .snaps
        10. validate snapshots under .snaps directory
        """
        # Enable USS
        g.log.info("Enable USS for volume")
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable USS on volume"
                         "%s" % self.volname)
        g.log.info("Successfully enabled USS on volume %s", self.volname)

        # Validate USS running
        g.log.info("Validating USS enabled or disabled")
        ret = is_uss_enabled(self.mnode, self.volname)
        self.assertTrue(
            ret, "Failed to validate USS for volume "
            "%s" % self.volname)
        g.log.info("Successfully validated USS for Volume" "%s", self.volname)

        # Validate snapd running
        for server in self.servers:
            g.log.info("Validating snapd daemon on:%s", server)
            ret = is_snapd_running(server, self.volname)
            self.assertTrue(ret, "Snapd is Not running on " "%s" % server)
            g.log.info("Snapd Running on node: %s", server)

        # Perform I/O
        g.log.info("Starting to Perform I/O")
        all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Generating data for %s:"
                       "%s", mount_obj.client_system, mount_obj.mountpoint)
            # Create files
            g.log.info('Creating files...')
            command = (
                "python %s create_files -f 100 --fixed-file-size 1M %s" %
                (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               command,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
        self.io_validation_complete = False

        # Creating snapshot with description
        g.log.info("Starting to Create snapshot")
        for count in range(0, self.snap_count):
            self.snap = "snap%s" % count
            ret, _, _ = snap_create(self.mnode,
                                    self.volname,
                                    self.snap,
                                    description='$p3C!@l C#@R@cT#R$')
            self.assertEqual(
                ret, 0,
                ("Failed to create snapshot for volume %s" % self.volname))
            g.log.info("Snapshot %s created successfully"
                       " for volume %s", self.snap, self.volname)

        # Validate snapshot list
        g.log.info("Starting to list all snapshots")
        ret, out, _ = snap_list(self.mnode)
        self.assertEqual(
            ret, 0, ("Failed to list snapshot of volume %s" % self.volname))
        s_list = out.strip().split('\n')
        self.assertEqual(len(s_list), self.snap_count, "Failed to validate "
                         "all snapshots")
        g.log.info(
            "Snapshot listed and  Validated for volume %s"
            " successfully", self.volname)

        # Activating snapshot
        g.log.info("Activating snapshot")
        for count in range(0, self.snap_count):
            self.snap = "snap%s" % count
            ret, _, _ = snap_activate(self.mnode, self.snap)
            self.assertEqual(ret, 0, "Failed to Activate snapshot "
                             "%s" % self.snap)
            g.log.info("snapshot %s activated successfully", self.snap)

        # Validate IO is completed
        self.assertTrue(validate_io_procs(all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")
        self.io_validation_complete = True

        # validate snapshots are listed under .snaps directory
        g.log.info("Validating snaps under .snaps")
        ret = view_snaps_from_mount(self.mounts, s_list)
        self.assertTrue(ret, "Failed to list snaps under .snaps" "directory")
        g.log.info("Snapshots Validated successfully")
Esempio n. 8
0
    def test_activate_deactivate(self):
        # pylint: disable=too-many-branches, too-many-statements
        """
        Verifying Snapshot activation/deactivation functionality.

        * Create Snapshot
        * Validate snapshot info before activation
        * Validate snapshot status before activation
        * Activate snapshot
        * Validate snapshot info after activation
        * Validate snapshot status after activation
        * Deactivate snapshot
        * Validate snapshot info after deactivation
        * Validate snapshot status after deactivation
        """

        # Create Snapshot
        snap_name = 'snap_%s' % self.volname
        g.log.info("Starting to Create Snapshot %s", snap_name)
        ret, _, _ = snap_create(self.mnode, self.volname, snap_name)
        self.assertEqual(ret, 0,
                         ("Snapshot Creation failed for %s", snap_name))
        g.log.info("Successfully created Snapshot %s for volume %s", snap_name,
                   self.volname)

        # Validate Snapshot Info Before Activation
        g.log.info("Validating 'snapshot info' in 'stopped' state before "
                   "activating the snapshot")
        ret = get_snap_info_by_snapname(self.mnode, snap_name)
        self.assertIsNotNone(
            ret, ("Failed to Fetch Snapshot info for %s", snap_name))
        g.log.info("Snapshot info Success for %s", ret['snapVolume']['status'])
        self.assertEqual(
            ret['snapVolume']['status'], 'Stopped',
            ("Unexpected: Snapshot %s Status is in Started state", snap_name))
        g.log.info("Expected: Snapshot is in Stopped state as it is "
                   "not Activated")

        # Validate Snapshot Status Before Activation
        g.log.info("Validating 'snapshot status' in 'stopped' state before "
                   "activating the snapshot")
        ret = get_snap_status_by_snapname(self.mnode, snap_name)
        self.assertIsNotNone(
            ret, ("Failed to Fetch Snapshot status for %s", snap_name))
        g.log.info("Snapshot Status Success for %s", snap_name)
        for brick in ret['volume']['brick']:
            self.assertEqual(brick['pid'], 'N/A',
                             ("Unexpected: Brick Pid '%s' is available for %s",
                              brick['pid'], brick['path']))
        g.log.info("Expected: Deactivated Snapshot Brick PID is 'N/A'")

        # Activate Snapshot
        g.log.info("Starting to Activate %s", snap_name)
        ret, _, _ = snap_activate(self.mnode, snap_name)
        self.assertEqual(ret, 0,
                         ("Snapshot Activation Failed for %s", snap_name))
        g.log.info("Snapshot %s Activated Successfully", snap_name)

        # Validate Snapshot Info After Activation
        g.log.info("Validating 'snapshot info' in 'started' state after"
                   " activating the snapshot")
        snap_info = get_snap_info_by_snapname(self.mnode, snap_name)
        self.assertEqual(snap_info['snapVolume']['status'], "Started",
                         ("Failed to Fetch Snapshot info after activate "
                          "for %s", snap_name))
        g.log.info("Success: Snapshot info in 'started' state")

        # Validate Snaphot Status After Activation
        g.log.info("Validating 'snapshot status' in started state after "
                   "activating the snapshot")
        ret = get_snap_status_by_snapname(self.mnode, snap_name)
        for brick in ret['volume']['brick']:
            self.assertNotEqual(brick['pid'], 'N/A',
                                ("Brick Path %s  Not Available for Activated "
                                 "Snapshot %s", (brick['path'], snap_name)))
        g.log.info("Sucessfully validated Activated Snapshot Brick Path "
                   "Available")

        # Deactivate Snapshot
        g.log.info("Starting to Deactivate %s", snap_name)
        ret, _, _ = snap_deactivate(self.mnode, snap_name)
        self.assertEqual(ret, 0,
                         ("Snapshot Deactivation Failed for %s", snap_name))
        g.log.info("Successfully Deactivated Snapshot %s", snap_name)

        # Validate Snapshot Info After Deactivation
        g.log.info("Validating 'snapshot info' in stopped state after "
                   "deactivating the snapshot")
        ret = get_snap_info_by_snapname(self.mnode, snap_name)
        self.assertEqual(ret['snapVolume']['status'], 'Stopped',
                         ("Snapshot Status is not in 'Stopped' State"))
        g.log.info("Expected: Snapshot is in Stopped state after Deactivation")

        # Validate Snaphot Status After Deactivation
        g.log.info("Validating 'snapshot status' in started state after "
                   "deactivating the snapshot")
        ret = get_snap_status_by_snapname(self.mnode, snap_name)
        for brick in ret['volume']['brick']:
            self.assertEqual(brick['pid'], 'N/A',
                             ("Deactivated Snapshot Brick Pid %s available "
                              "for %s", brick['pid'], brick['path']))
        g.log.info("Expected: Deactivated Snapshot Brick PID is 'N/A'")
Esempio n. 9
0
    def test_snap_clone_validate(self):
        """
        CloneSnapTest contains tests which verifies Clone volume
        created from snapshot

        Steps:

        1. Create a volume
        2. Mount the volume
        3. Perform I/O on mount poit
        4. Create a snapshot
        5. Activate the snapshot created in step 4
        6. Create 10 clones from snapshot created in step 4
        7. Verify Information about the volumes
           along with the original volume.
        8. Validate total number of clone volumes and existing volume
           with volume list
        """

        # write files on all mounts
        g.log.info("Starting IO on all mounts...")
        g.log.info("mounts: %s", self.mounts)
        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s create_files "
                   "-f 10 --base-file-name file %s" %
                   (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run(self.clients[0], cmd)
            all_mounts_procs.append(proc)
        g.log.info("Successfully Performed I/O on all mount points")

        # Creating snapshot:
        g.log.info("Starting to Create snapshot")
        ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
        self.assertEqual(
            ret, 0, ("Failed to create snapshot for volume %s" % self.volname))
        g.log.info("Snapshot snap1 created successfully for volume %s",
                   self.volname)

        # Activating snapshot
        g.log.info("Starting to Activate Snapshot")
        ret, _, _ = snap_activate(self.mnode, self.snap)
        self.assertEqual(ret, 0,
                         ("Failed to Activate snapshot %s" % self.snap))
        g.log.info("Snapshot %s activated successfully", self.snap)

        # Creating and starting a Clone of snapshot:
        g.log.info("Starting to Clone Snapshot")
        for count in range(1, 11):
            self.clone = "clone%s" % count
            ret, _, _ = snap_clone(self.mnode, self.snap, self.clone)
            self.assertEqual(ret, 0, "Failed to clone %s" % self.clone)
            g.log.info("%s created successfully", self.clone)

        # Start clone volumes
        g.log.info("starting to Validate clone volumes are started")
        for count in range(1, 11):
            self.clone = "clone%s" % count
            ret, _, _ = volume_start(self.mnode, self.clone)
            self.assertEqual(ret, 0, ("Failed to start %s" % self.clone))
            g.log.info("%s started successfully", self.clone)

        # Validate Volume Started
        g.log.info("Validating volume started")
        for count in range(1, 11):
            self.clone = "clone%s" % count
            vol_info = get_volume_info(self.mnode, self.clone)
            if vol_info[self.clone]['statusStr'] != 'Started':
                raise ExecutionError("Volume %s failed to start" % self.clone)
            g.log.info("Volume %s is in Started state", self.clone)

        # validate with list information
        # with 10 clone volume and 1 existing volume
        g.log.info("Validating with list information")
        ret, out, _ = volume_list(self.mnode)
        vlist = out.strip().split('\n')
        self.assertEqual(len(vlist), 11, "Failed to validate volume list")
        g.log.info("Successfully validated volumes in list")
Esempio n. 10
0
    def test_uss_brick_down(self):

        # pylint: disable=too-many-statements
        """
        Steps:
        * Create volume
        * Mount volume
        * Perform I/O on mounts
        * Bring down one brick
        * Enable USS
        * Validate USS is enabled
        * Bring the brick online using gluster v start force
        * Create 2 snapshots snapy1 & snapy2
        * Validate snap created
        * Activate snapy1 & snapy2
        * List snaps under .snap directory
          -- snap1 and snap2 should be listed under .snaps
        """

        # Perform I/O
        g.log.info("Starting IO on all mounts...")
        self.counter = 1
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = (
                "/usr/bin/env python %s create_deep_dirs_with_files "
                "--dirname-start-num %d "
                "--dir-depth 2 "
                "--dir-length 2 "
                "--max-num-of-dirs 2 "
                "--num-of-files 2 %s" %
                (self.script_upload_path, self.counter, mount_obj.mountpoint))

            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
        self.io_validation_complete = False

        # Validate IO
        self.assertTrue(validate_io_procs(self.all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")
        self.io_validation_complete = True

        # Bring down 1 brick from brick list
        g.log.info("Getting all the bricks of the volume")
        bricks_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(bricks_list, "Failed to get the brick list")
        g.log.info("Successfully got the list of bricks of volume")

        ret = bring_bricks_offline(self.volname, bricks_list[0])
        self.assertTrue(ret,
                        ("Failed to bring down the brick %s ", bricks_list[0]))
        g.log.info("Successfully brought the brick %s down", bricks_list[0])

        # Enable USS
        g.log.info("Enable USS on volume")
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable USS on volume")
        g.log.info("Successfully enabled USS on volume")

        # Validate USS is enabled
        g.log.info("Validating USS is enabled")
        ret = is_uss_enabled(self.mnode, self.volname)
        self.assertTrue(ret, "USS is disabled on volume " "%s" % self.volname)
        g.log.info("USS enabled on volume %s", self.volname)

        #  Bring the brick online using gluster v start force
        g.log.info("Bring the brick online using gluster v start force")
        ret, _, _ = volume_start(self.mnode, self.volname, force=True)
        self.assertEqual(ret, 0, "Volume start with force failed")
        g.log.info("Volume start with force successful")

        # Create 2 snapshot
        g.log.info("Creating 2 snapshots for volume %s", self.volname)
        for i in range(0, 2):
            ret, _, _ = snap_create(self.mnode, self.volname, "snapy%s" % i)
            self.assertEqual(
                ret, 0, ("Failed to create snapshot for %s" % self.volname))
            g.log.info("Snapshot %s created successfully for volume  %s",
                       "snapy%s" % i, self.volname)

        # Check for no of snaps using snap_list it should be 2 now
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            2, len(snap_list), "No of snaps not consistent "
            "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snaps.")

        # Activate snapshot snapy1 & snapy2
        g.log.info("Activating snapshot snapy1 & snapy2")
        for i in range(0, 2):
            ret, _, _ = snap_activate(self.mnode, "snapy%s" % i)
            self.assertEqual(ret, 0, "Failed to activate snapshot snapy%s" % i)
        g.log.info("Both snapshots activated successfully")

        # list activated snapshots directory under .snaps
        g.log.info("Listing activated snapshots under .snaps")
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(
                ret, 0, "Directory Listing Failed for"
                " Activated Snapshot")
            validate_dir = out.split('\n')
            for i in range(0, 2):
                self.assertIn(
                    "snapy%s" % i, validate_dir, "Failed to "
                    "validate snapy%s under .snaps directory")
                g.log.info("Activated Snapshot snapy%s listed Successfully", i)
    def test_snap_clone_snapd(self):
        """
        Steps:

        1. create a volume
        2. Create a snapshots and activate
        3. Clone the snapshot and mount it
        4. Check for snapd daemon
        5. enable uss and validate snapd
        5. stop cloned volume
        6. Validate snapd
        7. start cloned volume
        8. validate snapd
        9. Create 5 more snapshot
        10. Validate total number of
            snapshots created.
        11. Activate 5 snapshots
        12. Enable USS
        13. Validate snapd
        14. kill snapd on all nodes
        15. validate snapd running
        16. force start clone volume
        17. validate snaps inside .snaps directory
        """
        # pylint: disable=too-many-statements, too-many-locals

        # Starting I/O
        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s create_files "
                   "-f 10 --base-file-name file %s" % (
                       self.script_upload_path,
                       mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system, cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # Validate I/O
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("IO is successful on all mounts")

        # Creating snapshot
        ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
        self.assertEqual(ret, 0, ("Failed to create snapshot for volume %s"
                                  % self.volname))
        g.log.info("Snapshot %s created successfully for "
                   "volume %s", self.snap, self.volname)

        # Activating created snapshots
        ret, _, _ = snap_activate(self.mnode, self.snap)
        self.assertEqual(ret, 0, ("Failed to activate snapshot %s"
                                  % self.snap))
        g.log.info("Snapshot snap%s activated successfully", self.snap)

        # Snapshot list
        self.assertIsNotNone(
            get_snap_list(self.mnode), "Failed to list snapshot")
        g.log.info("Snapshot list command Successful")

        # Creating and starting a Clone of snapshot:
        ret, _, _ = snap_clone(self.mnode, self.snap, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to clone %s" % self.clone_vol1)
        g.log.info("Clone volume %s created successfully", self.clone_vol1)

        # Start the clone volumes
        ret, _, _ = volume_start(self.mnode, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to start %s" % self.clone_vol1)
        g.log.info("%s started successfully", self.clone_vol1)

        # Form server list
        brick_list = get_all_bricks(self.mnode, self.clone_vol1)
        for bricks in brick_list:
            self.server_lists.append(bricks.split(":")[0])
        self.server_list = list(set(self.server_lists))

        # Get volume info
        vol_info = get_volume_info(self.mnode, self.clone_vol1)
        self.assertIsNotNone(vol_info, "Failed to get vol info")
        g.log.info("Successfully in getting vol info")

        # Redefining mounts for cloned volume
        self.mount_points, self.mounts_dict_list = [], []
        for client in self.all_clients_info:
            mount = {
                'protocol': self.mount_type,
                'server': self.mnode,
                'volname': self.volname,
                'client': self.all_clients_info[client],
                'mountpoint': (path.join(
                    "%s" % self.mpoint)),
                'options': ''
            }
            self.mounts_dict_list.append(mount)
        self.mount1 = create_mount_objs(self.mounts_dict_list)
        self.mount_points.append(self.mpoint)
        g.log.info("Successfully made entry in self.mount1")

        # FUSE mount clone1 volume
        for mount_obj in self.mounts:
            ret, _, _ = mount_volume(self.clone_vol1, self.mount_type,
                                     self.mpoint,
                                     self.mnode, mount_obj.client_system)
            self.assertEqual(ret, 0, "Volume mount failed for clone1")
            g.log.info("%s mounted Successfully", self.clone_vol1)

            # Validate clone volume is mounted or not
            ret = is_mounted(self.clone_vol1, self.mpoint, self.mnode,
                             mount_obj.client_system, self.mount_type)
            self.assertTrue(ret, "Volume not mounted on mount point: "
                            "%s" % self.mpoint)
            g.log.info("Volume %s mounted on %s", self.clone_vol1, self.mpoint)

        # Log Cloned Volume information
        ret = log_volume_info_and_status(self.mnode, self.clone_vol1)
        self.assertTrue("Failed to Log Info and Status of Volume "
                        "%s" % self.clone_vol1)
        g.log.info("Successfully Logged Info and Status")

        # Validate snapd running on all nodes
        self.validate_snapd(check_condition=False)

        # Enable USS
        ret, _, _ = enable_uss(self.mnode, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to enable USS on cloned volume")
        g.log.info("Successfully enabled USS on Cloned volume")

        # Validate USS running
        self.validate_uss()

        # Validate snapd running on all nodes
        self.validate_snapd()

        # Stop cloned volume
        ret, _, _ = volume_stop(self.mnode, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to stop cloned volume "
                         "%s" % self.clone_vol1)
        g.log.info("Successfully Stopped Cloned volume %s", self.clone_vol1)

        # Validate snapd running on all nodes
        self.validate_snapd(check_condition=False)

        # Start cloned volume
        ret, _, _ = volume_start(self.mnode, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to start cloned volume"
                         " %s" % self.clone_vol1)
        g.log.info("Successfully started cloned volume"
                   " %s", self.clone_vol1)

        # Validate snapd running on all nodes
        self.validate_snapd()

        # Create 5 snapshots
        self.snaps_list = [('test_snap_clone_snapd-snap%s'
                            % i)for i in range(0, 5)]
        for snapname in self.snaps_list:
            ret, _, _ = snap_create(self.mnode, self.clone_vol1,
                                    snapname)
            self.assertEqual(ret, 0, ("Failed to create snapshot for volume"
                                      " %s" % self.clone_vol1))
            g.log.info("Snapshot %s created successfully for volume "
                       "%s", snapname, self.clone_vol1)

        # Validate USS running
        self.validate_uss()

        # Check snapshot under .snaps directory
        self.check_snaps()

        # Activate Snapshots
        for snapname in self.snaps_list:
            ret, _, _ = snap_activate(self.mnode, snapname)
            self.assertEqual(ret, 0, ("Failed to activate snapshot %s"
                                      % snapname))
            g.log.info("Snapshot %s activated "
                       "successfully", snapname)

        # Validate USS running
        self.validate_uss()

        # Validate snapshots under .snaps folder
        self.validate_snaps()

        # Kill snapd on node and validate snapd except management node
        for server in self.servers[1:]:
            ret, _, _ = terminate_snapd_on_node(server)
            self.assertEqual(ret, 0, "Failed to Kill snapd on node %s"
                             % server)
            g.log.info("snapd Killed Successfully on node %s", server)

            # Check snapd running
            ret = is_snapd_running(server, self.clone_vol1)
            self.assertTrue(ret, "Unexpected: Snapd running on node: "
                            "%s" % server)
            g.log.info("Expected: Snapd is not running on node:%s", server)

            # Check snapshots under .snaps folder
            g.log.info("Validating snapshots under .snaps")
            ret, _, _ = uss_list_snaps(self.clients[0], self.mpoint)
            self.assertEqual(ret, 0, "Target endpoint not connected")
            g.log.info("Successfully listed snapshots under .snaps")

        # Kill snapd in management node
        ret, _, _ = terminate_snapd_on_node(self.servers[0])
        self.assertEqual(ret, 0, "Failed to Kill snapd on node %s"
                         % self.servers[0])
        g.log.info("snapd Killed Successfully on node %s", self.servers[0])

        # Validate snapd running on all nodes
        self.validate_snapd(check_condition=False)

        # Validating snapshots under .snaps
        ret, _, _ = uss_list_snaps(self.clients[0], self.mpoint)
        self.assertNotEqual(ret, 0, "Unexpected: Successfully listed "
                            "snapshots under .snaps")
        g.log.info("Expected: Target endpoint not connected")

        # Start the Cloned volume(force start)
        ret, _, _ = volume_start(self.mnode, self.clone_vol1, force=True)
        self.assertEqual(ret, 0, "Failed to start cloned volume "
                         "%s" % self.clone_vol1)
        g.log.info("Successfully Started Cloned volume %s", self.clone_vol1)

        # Validate snapd running on all nodes
        self.validate_snapd()

        # Validate snapshots under .snaps folder
        self.validate_snaps()
Esempio n. 12
0
    def test_activate_deactivate(self):
        # pylint: disable=too-many-branches, too-many-statements
        """
        Verifying Snapshot activation/deactivation functionality.
        """
        snap_name = 'snap_%s' % self.volname
        # Create Snapshot
        g.log.info("Starting to Create Snapshot %s", snap_name)
        ret, _, _ = snap_create(self.mnode, self.volname, snap_name)
        self.assertEqual(ret, 0, "Snapshot Creation failed"
                         " for %s" % snap_name)
        g.log.info("Snapshot %s of volume %s created"
                   " successfully", snap_name, self.volname)

        # Validate Snapshot Info Before Activation
        ret = get_snap_info_by_snapname(self.mnode, snap_name)
        if ret is None:
            raise ExecutionError("Failed to Fetch Snapshot"
                                 "info for %s" % snap_name)
        g.log.info("Snapshot info Success"
                   "for %s", ret['snapVolume']['status'])
        if ret['snapVolume']['status'] != 'Stopped':
            raise ExecutionError("Unexpected: "
                                 "Snapshot %s Status is in "
                                 "Started State" % snap_name)
        g.log.info("Expected: Snapshot is in Stopped state "
                   "as it is not Activated")

        # Validate Snapshot Status Before Activation
        ret = get_snap_status_by_snapname(self.mnode, snap_name)
        if ret is None:
            raise ExecutionError("Failed to Fetch Snapshot"
                                 "status for %s" % snap_name)
        g.log.info("Snapshot Status Success for %s", snap_name)
        for brick in ret['volume']['brick']:
            if brick['pid'] != 'N/A':
                raise ExecutionError(
                    "Brick Pid %s available for %s" % brick['pid'],
                    brick['path'])
        g.log.info("Deactivated Snapshot Brick PID N/A as Expected")

        # Activate Snapshot
        g.log.info("Starting to Activate %s", snap_name)
        ret, _, _ = snap_activate(self.mnode, snap_name)
        self.assertEqual(ret, 0, "Snapshot Activation Failed"
                         " for %s" % snap_name)
        g.log.info("Snapshot %s Activated Successfully", snap_name)

        # Validate Snapshot Info After Activation
        g.log.info("Validate snapshot info")
        snap_info = get_snap_info_by_snapname(self.mnode, snap_name)
        self.assertEqual(
            snap_info['snapVolume']['status'], "Started",
            "Failed to Fetch Snapshot"
            "info after activate "
            "for %s" % snap_name)
        g.log.info("Snapshot info Success ")

        # Validate Snaphot Status After Activation
        g.log.info("Validate snapshot status")
        ret = get_snap_status_by_snapname(self.mnode, snap_name)
        for brick in ret['volume']['brick']:
            self.assertNotEqual(
                brick['pid'], 'N/A', "Brick Path "
                "%s  Not Available "
                "for Activated Snapshot %s" % (brick['path'], snap_name))
        g.log.info("Activated Snapshot Brick Path " "Available as Expected")

        # Validation After Snapshot Deactivate
        g.log.info("Starting to Deactivate %s", snap_name)
        ret, _, _ = snap_deactivate(self.mnode, snap_name)
        self.assertEqual(ret, 0, "Snapshot Deactivation Failed"
                         " for %s" % snap_name)
        g.log.info("Snapshot %s Deactivation Successfully", snap_name)

        # Validate Snapshot Info After Deactivation
        ret = get_snap_info_by_snapname(self.mnode, snap_name)
        self.assertEqual(ret['snapVolume']['status'], 'Stopped',
                         "Snapshot Status is not "
                         "in Stopped State")
        g.log.info("Snapshot is in Stopped state after Deactivation")

        # Validate Snaphot Status After Activation
        g.log.info("Validate snapshot status")
        ret = get_snap_status_by_snapname(self.mnode, snap_name)
        for brick in ret['volume']['brick']:
            self.assertEqual(
                brick['pid'], 'N/A', "Deactivated "
                "Snapshot Brick "
                "Pid %s available "
                "for %s" % (brick['pid'], brick['path']))
        g.log.info("Deactivated Snapshot Brick PID N/A as Expected")
Esempio n. 13
0
    def test_uss_snap_restore(self):
        """
        Description:
            This test case will validate USS after Snapshot restore.
            The restored snapshot should not be listed under the '.snaps'
            directory.

        * Perform I/O on mounts
        * Enable USS on volume
        * Validate USS is enabled
        * Create a snapshot
        * Activate the snapshot
        * Perform some more I/O
        * Create another snapshot
        * Activate the second
        * Restore volume to the second snapshot
        * From mount point validate under .snaps
          - first snapshot should be listed
          - second snapshot should not be listed
        """

        # pylint: disable=too-many-statements
        # Perform I/O
        cmd = ("/usr/bin/env python %s create_files "
               "-f 10 --base-file-name firstfiles %s" %
               (self.script_upload_path, self.mounts[0].mountpoint))
        proc = g.run_async(self.mounts[0].client_system,
                           cmd,
                           user=self.mounts[0].user)
        self.all_mounts_procs.append(proc)

        # Wait for IO to complete and validate IO
        self.assertTrue(
            wait_for_io_to_complete(self.all_mounts_procs, self.mounts[0]),
            "IO failed on %s" % self.mounts[0])
        g.log.info("IO is successful on all mounts")

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Enable USS
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable USS on volume")
        g.log.info("Successfully enabled USS on volume")

        # Validate USS is enabled
        ret = is_uss_enabled(self.mnode, self.volname)
        self.assertTrue(ret, "USS is disabled on volume %s" % self.volname)
        g.log.info("USS enabled on volume %s", self.volname)

        # Create a snapshot
        ret, _, _ = snap_create(self.mnode, self.volname, self.snapshots[0])
        self.assertEqual(ret, 0,
                         ("Failed to create snapshot for %s" % self.volname))
        g.log.info("Snapshot %s created successfully for volume  %s",
                   self.snapshots[0], self.volname)

        # Check for number of snaps using snap_list it should be 1 now
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            1, len(snap_list), "No of snaps not consistent "
            "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snapshots")

        # Activate the snapshot
        ret, _, _ = snap_activate(self.mnode, self.snapshots[0])
        self.assertEqual(
            ret, 0, ("Failed to activate snapshot %s" % self.snapshots[0]))
        g.log.info("Snapshot %s activated successfully", self.snapshots[0])

        # Perform I/O
        self.all_mounts_procs = []
        cmd = ("/usr/bin/env python %s create_files "
               "-f 10 --base-file-name secondfiles %s" %
               (self.script_upload_path, self.mounts[0].mountpoint))
        proc = g.run_async(self.mounts[0].client_system,
                           cmd,
                           user=self.mounts[0].user)
        self.all_mounts_procs.append(proc)

        # Wait for IO to complete and validate IO
        self.assertTrue(
            wait_for_io_to_complete(self.all_mounts_procs, self.mounts[0]),
            "IO failed on %s" % self.mounts[0])
        g.log.info("IO is successful on all mounts")

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Create another snapshot
        ret, _, _ = snap_create(self.mnode, self.volname, self.snapshots[1])
        self.assertEqual(
            ret, 0, ("Failed to create snapshot for volume %s" % self.volname))
        g.log.info("Snapshot %s created successfully for volume  %s",
                   self.snapshots[1], self.volname)

        # Check for number of snaps using snap_list it should be 2 now
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            2, len(snap_list), "No of snaps not consistent "
            "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snapshots")

        # Activate the second snapshot
        ret, _, _ = snap_activate(self.mnode, self.snapshots[1])
        self.assertEqual(
            ret, 0, ("Failed to activate snapshot %s" % self.snapshots[1]))
        g.log.info("Snapshot %s activated successfully", self.snapshots[1])

        # Restore volume to the second snapshot
        ret = snap_restore_complete(self.mnode, self.volname,
                                    self.snapshots[1])
        self.assertTrue(ret, ("Failed to restore snap %s on the "
                              "volume %s" % (self.snapshots[1], self.volname)))
        g.log.info("Restore of volume is successful from %s on "
                   "volume %s", self.snapshots[1], self.volname)

        # Verify all volume processes are online
        ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
        self.assertTrue(ret, "Failed: All volume processes are not online")
        g.log.info("All volume processes are online")
        ret = is_snapd_running(self.mnode, self.volname)
        self.assertTrue(
            ret, "Failed: snapd is not running for volume %s" % self.volname)
        g.log.info("Successful: snapd is running")

        # List activated snapshots under the .snaps directory
        snap_dir_list = get_uss_list_snaps(self.mounts[0].client_system,
                                           self.mounts[0].mountpoint)
        self.assertIsNotNone(
            snap_dir_list, "Failed to list snapshots under .snaps directory")
        g.log.info("Successfully gathered list of snapshots under the .snaps"
                   " directory")

        # Check for first snapshot as it should get listed here
        self.assertIn(self.snapshots[0], snap_dir_list,
                      ("Unexpected : %s not listed under .snaps "
                       "directory" % self.snapshots[0]))
        g.log.info("Activated Snapshot %s listed Successfully",
                   self.snapshots[0])

        # Check for second snapshot as it should not get listed here
        self.assertNotIn(self.snapshots[1], snap_dir_list,
                         ("Unexpected : %s listed in .snaps "
                          "directory" % self.snapshots[1]))
        g.log.info("Restored Snapshot %s not listed ", self.snapshots[1])
Esempio n. 14
0
    def test_validate_snaps_dir_over_uss(self):

        # pylint: disable=too-many-statements
        """
        Run IOs on mount and take 2 snapshot.
        Activate 1 snapshot and check directory listing.
        Try to write to .snaps should not allow.
        Try listing the other snapshot should fail.
        """

        # run IOs
        self.counter = 1
        g.log.info("Starting IO on all mounts...")
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = (
                "/usr/bin/env python %s create_deep_dirs_with_files "
                "--dirname-start-num %d "
                "--dir-depth 2 "
                "--dir-length 2 "
                "--max-num-of-dirs 2 "
                "--num-of-files 2 %s" %
                (self.script_upload_path, self.counter, mount_obj.mountpoint))

            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
        self.io_validation_complete = False

        # Validate IO
        self.assertTrue(validate_io_procs(self.all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")
        self.io_validation_complete = True

        # get the snapshot list.
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(len(snap_list), 0, "Unexpected: %s snapshots"
                         "present" % len(snap_list))
        g.log.info("Expected: No snapshots present")

        # Create 2 snapshot
        g.log.info("Starting to Create Snapshots")
        for snap_num in range(0, 2):
            ret, _, _ = snap_create(self.mnode, self.volname,
                                    "snap-%s" % snap_num)
            self.assertEqual(
                ret, 0, "Snapshot Creation failed"
                " for snap-%s" % snap_num)
            g.log.info("Snapshot snap-%s of volume %s created"
                       " successfully", snap_num, self.volname)

        # Activate snap-0
        g.log.info("Activating snapshot snap-0")
        ret, _, _ = snap_activate(self.mnode, "snap-0")
        self.assertEqual(ret, 0, "Failed to activate " "Snapshot snap-0")
        g.log.info("Snapshot snap-0 Activated Successfully")

        # Enable USS for volume
        g.log.info("Enable uss for volume")
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(
            ret, 0, "Failed to enable USS for "
            " volume %s" % self.volname)
        g.log.info("Successfully enabled USS " "for volume %s", self.volname)

        # Validate uss enabled
        g.log.info("Validating uss enabled")
        ret = is_uss_enabled(self.mnode, self.volname)
        self.assertTrue(ret, "Failed to validate uss enable")
        g.log.info("Successfully validated uss enable for volume"
                   "%s", self.volname)

        # list activated snapshots directory under .snaps
        g.log.info("Listing activated snapshots under .snaps")
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(
                ret, 0, "Directory Listing Failed for"
                " Activated Snapshot")
            validate_dir = out.split('\n')
            self.assertIn(
                'snap-0', validate_dir, "Failed to "
                "validate snap-0 under .snaps directory")
            g.log.info("Activated Snapshot Successfully listed")
            self.assertNotIn(
                'snap-1', validate_dir, "Unexpected: "
                "Successfully listed snap-1 under "
                ".snaps directory")
            g.log.info("Expected: De-activated Snapshot not listed")

        # start I/0 ( write and read )
        g.log.info("Starting IO on all mounts...")
        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s create_files "
                   "-f 10 --base-file-name file %s/.snaps/abc/" %
                   (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # IO should fail
        g.log.info("IO should Fail with ROFS error.....")
        self.assertFalse(validate_io_procs(all_mounts_procs, self.mounts),
                         "Unexpected: IO successfully completed")
        g.log.info("Expected: IO failed to complete")

        # validate snap-0 present in mountpoint
        ret = view_snaps_from_mount(self.mounts, "snap-0")
        self.assertTrue(
            ret, "UnExpected: Unable to list content "
            "in activated snapshot"
            " activated snapshot")
        g.log.info("Expected: Successfully listed contents in"
                   " activated snapshot")
Esempio n. 15
0
    def test_snap_del_original_volume(self):
        # pylint: disable=too-many-statements
        """
        Steps:
        1. Create and mount distributed-replicated volume
        2. Perform I/O on mountpoints
        3. Create snapshot
        4. activate snapshot created in step3
        5. clone created snapshot in step3
        6. delete original volume
        7. Validate clone volume

        """
        # Perform I/O
        all_mounts_procs = []
        g.log.info("Generating data for %s:"
                   "%s", self.mounts[0].client_system,
                   self.mounts[0].mountpoint)
        # Create files
        g.log.info('Creating files...')
        command = ("/usr/bin/env python %s create_files -f 100 "
                   "--fixed-file-size 1k %s" %
                   (self.script_upload_path, self.mounts[0].mountpoint))
        proc = g.run_async(self.mounts[0].client_system,
                           command,
                           user=self.mounts[0].user)
        all_mounts_procs.append(proc)
        self.io_validation_complete = False

        # Validate IO
        self.assertTrue(validate_io_procs(all_mounts_procs, self.mounts[0]),
                        "IO failed on some of the clients")
        self.io_validation_complete = True

        # Creating snapshot
        g.log.info("Starting to Create snapshot")
        ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
        self.assertEqual(ret, 0, ("Failed to create snapshot %s for "
                                  "volume %s" % (self.snap, self.volname)))
        g.log.info("Snapshot %s created successfully for volume "
                   "%s", self.snap, self.volname)

        # Activating snapshot
        g.log.info("Starting to Activate Snapshot")
        ret, _, _ = snap_activate(self.mnode, self.snap)
        self.assertEqual(ret, 0, ("Failed to Activate snapshot "
                                  "%s" % self.snap))
        g.log.info("Snapshot %s activated successfully", self.snap)

        # snapshot list
        g.log.info("getting snapshot list")
        ret, out, _ = snap_list(self.mnode)
        self.assertEqual(
            ret, 0, ("Failed to list snapshot of volume %s" % self.volname))
        self.assertIn(
            self.snap, out, "Failed to validate snapshot"
            " %s in snap list" % self.snap)
        g.log.info("Snapshot list command for volume %s is "
                   "successful", self.volname)

        # Creating a Clone of snapshot:
        g.log.info("Starting to create Clone of Snapshot")
        ret, _, _ = snap_clone(self.mnode, self.snap, self.clone)
        self.assertEqual(ret, 0,
                         ("Failed to create clone volume %s "
                          "from snapshot %s" % (self.clone, self.snap)))
        g.log.info("Clone Volume %s created successfully from snapshot "
                   "%s", self.clone, self.snap)

        # After cloning a volume wait for 5 second to start the volume
        sleep(5)

        # Validate clone volumes are started:
        g.log.info("starting to Validate clone volumes are started")
        ret, _, _ = volume_start(self.mnode, self.clone)
        self.assertEqual(ret, 0, ("Failed to start cloned volume "
                                  "%s" % self.clone))
        g.log.info("Volume %s started successfully", self.clone)

        for mount_obj in self.mounts:
            # Unmount Volume
            g.log.info("Starting to Unmount Volume %s", self.volname)
            ret = umount_volume(mount_obj.client_system,
                                mount_obj.mountpoint,
                                mtype=self.mount_type)
            self.assertTrue(ret,
                            ("Failed to Unmount Volume %s" % self.volname))
        g.log.info("Successfully Unmounted Volume %s", self.volname)

        # Delete original volume
        g.log.info("deleting original volume")
        ret = cleanup_volume(self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to delete volume %s" % self.volname))
        g.log.info("successfully deleted volume %s", self.volname)

        # get volume info
        g.log.info("Getting and validating cloned volume %s", self.clone)
        vol_info = get_volume_info(self.mnode, self.clone)
        self.assertIsNotNone(
            vol_info, "Failed to get volume info "
            "for cloned volume %s" % self.clone)
        self.assertEqual(
            vol_info[self.clone]['statusStr'], 'Started',
            "Unexpected: cloned volume is not started "
            "%s " % self.clone)
        g.log.info("Volume %s is in Started state", self.clone)

        # Volume status
        g.log.info("Getting volume status")
        ret, out, _ = volume_status(self.mnode, self.clone)
        self.assertEqual(ret, 0, "Failed to get volume status for"
                         " %s" % self.clone)
        vol = out.strip().split("\n")
        vol1 = vol[0].strip().split(":")
        self.assertEqual(
            vol1[1], " %s" % self.clone, "Failed to "
            "get volume status for volume %s" % self.clone)
        g.log.info("Volume Status is Successful for %s clone volume",
                   self.clone)

        # Volume list validate
        g.log.info("Starting to list volume")
        ret, vol_list, _ = volume_list(self.mnode)
        self.assertEqual(ret, 0, "Failed to get volume list")
        vol_list1 = vol_list.strip().split("\n")
        self.assertIn(
            "%s" % self.clone, vol_list1, "Failed to validate "
            "volume list for volume %s" % self.clone)
        g.log.info("Volume list validated Successfully for"
                   "volume %s", self.clone)
    def test_snapshot_basic_commands_when_io_in_progress(self):
        """Create, List, Activate, Enable USS (User Serviceable Snapshot),
            Viewing Snap of the volume from mount, De-Activate
            when IO is in progress.
        """
        snap_name = "snap_cvt"
        # Create Snapshot
        g.log.info("Creating snapshot %s of the volume %s", snap_name,
                   self.volname)
        ret, _, _ = snap_create(self.mnode, self.volname, snap_name)
        self.assertEqual(ret, 0,
                         ("Failed to create snapshot with name %s "
                          " of the volume %s", snap_name, self.volname))
        g.log.info("Successfully created snapshot %s of the volume %s",
                   snap_name, self.volname)

        # List Snapshot
        g.log.info("Listing the snapshot created for the volume %s",
                   self.volname)
        snap_list = get_snap_list(self.mnode)
        self.assertIsNotNone(snap_list, "Unable to get the Snapshot list")
        self.assertIn(snap_name, snap_list,
                      ("snapshot %s not listed in Snapshot list", snap_name))
        g.log.info("Successfully listed snapshot %s in gluster snapshot list",
                   snap_name)

        # Activate the snapshot
        g.log.info("Activating snapshot %s of the volume %s", snap_name,
                   self.volname)
        ret, _, _ = snap_activate(self.mnode, snap_name)
        self.assertEqual(ret, 0,
                         ("Failed to activate snapshot with name %s "
                          " of the volume %s", snap_name, self.volname))
        g.log.info("Successfully activated snapshot %s of the volume %s",
                   snap_name, self.volname)

        # Enable USS on the volume.
        uss_options = ["features.uss"]
        if self.mount_type == "cifs":
            uss_options.append("features.show-snapshot-directory")
        g.log.info("Enable uss options %s on the volume %s", uss_options,
                   self.volname)
        ret = enable_and_validate_volume_options(self.mnode,
                                                 self.volname,
                                                 uss_options,
                                                 time_delay=30)
        self.assertTrue(ret, ("Unable to enable uss options %s on volume %s",
                              uss_options, self.volname))
        g.log.info("Successfully enabled uss options %s on the volume: %s",
                   uss_options, self.volname)

        # Viewing snapshot from mount
        g.log.info("Viewing Snapshot %s from mounts:", snap_name)
        ret = view_snaps_from_mount(self.mounts, snap_name)
        self.assertTrue(ret, ("Failed to View snap %s from mounts", snap_name))
        g.log.info("Successfully viewed snap %s from mounts", snap_name)

        # De-Activate the snapshot
        g.log.info("Deactivating snapshot %s of the volume %s", snap_name,
                   self.volname)
        ret, _, _ = snap_deactivate(self.mnode, snap_name)
        self.assertEqual(ret, 0,
                         ("Failed to deactivate snapshot with name %s "
                          " of the volume %s", snap_name, self.volname))
        g.log.info("Successfully deactivated snapshot %s of the volume %s",
                   snap_name, self.volname)

        # Viewing snapshot from mount (.snaps shouldn't be listed from mount)
        for mount_obj in self.mounts:
            g.log.info("Viewing Snapshot %s from mount %s:%s", snap_name,
                       mount_obj.client_system, mount_obj.mountpoint)
            ret = view_snaps_from_mount(mount_obj, snap_name)
            self.assertFalse(ret, ("Still able to View snap %s from mount "
                                   "%s:%s", snap_name, mount_obj.client_system,
                                   mount_obj.mountpoint))
            g.log.info("%s not listed under .snaps from mount %s:%s",
                       snap_name, mount_obj.client_system,
                       mount_obj.mountpoint)
        g.log.info(
            "%s not listed under .snaps from mounts after "
            "deactivating ", snap_name)

        # Validate IO
        g.log.info("Wait for IO to complete and validate IO ...")
        ret = validate_io_procs(self.all_mounts_procs, self.mounts)
        self.io_validation_complete = True
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("IO is successful on all mounts")

        # List all files and dirs created
        g.log.info("List all files and directories:")
        ret = list_all_files_and_dirs_mounts(self.mounts)
        self.assertTrue(ret, "Failed to list all files and dirs")
        g.log.info("Listing all files and directories is successful")
Esempio n. 17
0
    def test_mount_snap_delete(self):
        """
        Mount the snap volume
        * Create volume, FUSE mount the volume
        * perform I/O on mount points
        * Creating snapshot and activate snapshot
        * FUSE mount the snapshot created
        * Perform I/O on mounted snapshot
        * I/O should fail
        """
        # pylint: disable=too-many-statements
        # starting I/O
        g.log.info("Starting IO on all mounts...")
        g.log.info("mounts: %s", self.mounts)
        all_mounts_procs = []
        self.counter = 1
        for mount_obj in self.mounts:
            cmd = (
                "/usr/bin/env python %s create_files "
                "-f 10 --base-file-name file%d %s" %
                (self.script_upload_path, self.counter, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            self.counter += 100

        # Validate I/O
        self.assertTrue(validate_io_procs(all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")

        # Creating snapshot
        g.log.info("Starting to create snapshots")
        ret, _, _ = snap_create(self.mnode, self.volname, "snap1")
        self.assertEqual(ret, 0,
                         ("Failed to create snapshot for %s" % self.volname))
        g.log.info("Snapshot snap1 created successfully "
                   "for volume  %s", self.volname)

        # Activating snapshot
        g.log.info("Activating snapshot")
        ret, _, _ = snap_activate(self.mnode, "snap1")
        self.assertEqual(ret, 0, ("Failed to Activate snapshot snap1"))
        g.log.info("snap1 activated successfully")

        # redefine mounts
        self.mount_points = []
        self.mounts_dict_list = []
        for client in self.all_clients_info:
            mount = {
                'protocol': self.mount_type,
                'server': self.mnode,
                'volname': self.volname,
                'client': self.all_clients_info[client],
                'mountpoint': (os.path.join("/mnt/snap1")),
                'options': ''
            }
            self.mounts_dict_list.append(mount)
        self.mount1 = create_mount_objs(self.mounts_dict_list)
        g.log.info("Successfully made entry in self.mount1")

        # FUSE mount snap1 snapshot
        g.log.info("Mounting snapshot snap1")
        cmd = "mkdir -p  %s" % self.mpoint
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, ("Creation of directory %s"
                                  "for mounting"
                                  "volume snap1 failed" % (self.mpoint)))
        self.mount_points.append(self.mpoint)
        cmd = "mount -t glusterfs %s:/snaps/snap1/%s %s" % (
            self.mnode, self.volname, self.mpoint)
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, ("Failed to mount snap1"))
        g.log.info("snap1 is mounted Successfully")

        # starting I/O
        g.log.info("Starting IO on all mounts...")
        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s create_files "
                   "-f 10 --base-file-name file %s" %
                   (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # Validate I/O
        self.assertTrue(validate_io_procs(all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")

        # start I/O
        g.log.info("Starting IO on all mounts...")
        g.log.info("mounts: %s", self.mount1)
        all_mounts_procs = []
        for mount_obj in self.mount1:
            cmd = ("/usr/bin/env python %s create_files "
                   "-f 10 --base-file-name file %s" %
                   (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # validate io should fail
        self.assertFalse(validate_io_procs(all_mounts_procs, self.mounts),
                         "Unexpected: IO Successful on all clients")
        g.log.info("Expected: IO failed on clients")
    def test_uss_snap_active_deactive(self):

        # pylint: disable=too-many-statements
        """
        Steps:
        * Create volume
        * Mount volume
        * Perform I/O on mounts
        * Create 2 snapshots snapy1 & snapy2
        * Validate snap created
        * Enable USS
        * Validate USS is enabled
        * Validate snapd is running
        * Activate snapy1 & snapy2
        * List snaps under .snap directory
          -- snap1 and snap2 should be listed under .snaps
        * Deactivate snapy2
        * List snaps under .snap directory
          -- snapy2 is not listed as it is deactivated
        * Activate snapy2
        * List snaps under .snap directory
          -- snap1 and snap2 should be listed under .snaps
        """

        # Perform I/O
        g.log.info("Starting IO on all mounts...")
        self.counter = 1
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = (
                "python %s create_deep_dirs_with_files "
                "--dirname-start-num %d "
                "--dir-depth 2 "
                "--dir-length 2 "
                "--max-num-of-dirs 2 "
                "--num-of-files 2 %s" %
                (self.script_upload_path, self.counter, mount_obj.mountpoint))

            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
        self.io_validation_complete = False

        # Validate IO
        g.log.info("Wait for IO to complete and validate IO ...")
        ret = validate_io_procs(self.all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        self.io_validation_complete = True
        g.log.info("I/O successful on clients")

        # Enable USS
        g.log.info("Enable USS on volume")
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable USS on volume")
        g.log.info("Successfully enabled USS on volume")

        # Validate USS is enabled
        g.log.info("Validating USS is enabled")
        ret = is_uss_enabled(self.mnode, self.volname)
        self.assertTrue(ret, "USS is disabled on volume " "%s" % self.volname)
        g.log.info("USS enabled on volume %s", self.volname)

        # Validate snapd running
        for server in self.servers:
            g.log.info("Validating snapd daemon on:%s", server)
            ret = is_snapd_running(server, self.volname)
            self.assertTrue(ret, "Snapd is Not running on " "%s" % server)
            g.log.info("Snapd Running on node: %s", server)

        # Create 2 snapshot
        g.log.info("Creating 2 snapshots for volume %s", self.volname)
        for i in range(1, 3):
            ret, _, _ = snap_create(self.mnode, self.volname, "snapy%s" % i)
            self.assertEqual(
                ret, 0, ("Failed to create snapshot for %s" % self.volname))
            g.log.info("Snapshot %s created successfully for volume  %s",
                       "snapy%s" % i, self.volname)

        # Check for no of snaps using snap_list it should be 2 now
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            2, len(snap_list), "No of snaps not consistent "
            "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snaps.")

        # Activate snapshot snapy1 & snapy2
        g.log.info("Activating snapshot snapy1 & snapy2")
        for i in range(1, 3):
            ret, _, _ = snap_activate(self.mnode, "snapy%s" % i)
            self.assertEqual(ret, 0, "Failed to activate snapshot snapy%s" % i)
        g.log.info("Both snapshots activated successfully")

        # list activated snapshots directory under .snaps
        g.log.info("Listing activated snapshots under .snaps")
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(
                ret, 0, "Directory Listing Failed for"
                " Activated Snapshot")
            validate_dir = out.split('\n')
            self.assertIn(
                "snapy1", validate_dir, "Failed to "
                "validate snapy1 under .snaps directory")
            g.log.info("Activated Snapshot snapy1 listed Successfully")
            self.assertIn(
                "snapy2", validate_dir, "Successfully listed"
                " snapy2 under.snaps directory")
            g.log.info("Expected: De-activated Snapshot not listed")

        # Deactivate snapshot snapy2
        g.log.info("Deactivating snapshot snapy2")
        ret, _, _ = snap_deactivate(self.mnode, "snapy2")
        self.assertEqual(ret, 0, "Failed to deactivate snapshot snapy2")
        g.log.info("Successfully deactivated snapshot snapy2")

        # validate snapy2 should not present in mountpoint
        ret = view_snaps_from_mount(self.mounts, "snapy2")
        self.assertFalse(
            ret, " UnExpected : Still able to View snapy2"
            " from mount ")
        g.log.info("Successfully verified deactivated snapshot "
                   "snapy2 is not listed")

        # Activate snapshot snapy2
        ret, _, _ = snap_activate(self.mnode, "snapy2")
        self.assertEqual(ret, 0, "Failed to activate Snapshot snapy2")
        g.log.info("Snapshot snapy2 activated successfully")

        # list activated snapshots directory under .snaps
        g.log.info("Listing activated snapshots under .snaps")
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(
                ret, 0, "Directory Listing Failed for"
                " Activated Snapshot")
            validate_dir = out.split('\n')
            self.assertIn(
                "snapy1", validate_dir, "Failed to "
                "validate snapy%s under .snaps directory")
            g.log.info("Activated Snapshot listed Successfully")
            self.assertIn(
                "snapy2", validate_dir, "Successfully listed"
                "snapy2 under .snaps directory")
            g.log.info("Expected: De-activated Snapshot not listed")
Esempio n. 19
0
    def test_snap_uss(self):
        # pylint: disable=too-many-statements
        """
        Steps:
        1. Create a volume and mount it.
        2. Perform I/O on mounts
        3. create a .snaps directory and create some files
        4. Create Multiple snapshots of volume
        5. Check info of volume
        6. Enable USS for volume
        7. Validate files created under .snaps
        8. Disable USS
        9. Again Validate the files created under .snaps directory
        """
        # write files on all mounts
        g.log.info("Starting IO on all mounts...")
        g.log.info("mounts: %s", self.mounts)
        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s create_files "
                   "-f 10 --base-file-name file %s" %
                   (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # Validate IO
        self.assertTrue(validate_io_procs(all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")

        # starting I/O
        g.log.info("Starting IO on all mounts...")
        for mount_obj in self.mounts:
            self.mpoint = "%s/.snaps" % mount_obj.mountpoint
            ret = file_exists(mount_obj.client_system, self.mpoint)
            if not ret:
                ret = mkdir(mount_obj.client_system, self.mpoint)
                self.assertTrue(ret, "Failed to create .snaps directory")
                g.log.info("Successfully created .snaps directory")
                break
            else:
                # Validate USS running
                g.log.info("Validating USS enabled or disabled")
                ret = is_uss_enabled(self.mnode, self.volname)
                if not ret:
                    break
                else:
                    g.log.info("USS is enabled in volume %s", self.volname)
                    ret, _, _ = disable_uss(self.mnode, self.volname)
                    self.assertEqual(
                        ret, 0, "Failed to disable USS on "
                        " volume %s" % self.volname)
                    g.log.info("USS disabled in Volume %s", self.volname)
                    ret = mkdir(mount_obj.client_system, self.mpoint)
                    self.assertTrue(ret, "Failed to create .snaps directory")
                    g.log.info("Successfully created .snaps directory")
            cmd = ("/usr/bin/env python %s create_files "
                   "-f 10 --base-file-name foo %s" %
                   (self.script_upload_path, self.mpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # List files under created .snaps directory
        g.log.info("Starting to list files under .snaps directory")
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(ret, 0, "Failed to list files under .snaps")
            g.log.info("Successfully Created files under .snaps directory")
            before_uss_enable = out.strip().split('\n')
            # deleting the mount path from list
            del before_uss_enable[0]

        # Create Multiple snapshots for volume
        g.log.info("Creating snapshots")
        self.snaps_list = []
        for snap_count in range(1, 5):
            self.snap = "snap%s" % snap_count
            ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
            self.assertEqual(
                ret, 0, "Failed to create snapshot "
                "%s for volume %s" % (self.snap, self.volname))
            self.snaps_list.append(self.snap)
            g.log.info("Snapshot %s created successfully for volume %s",
                       self.snap, self.volname)
        g.log.info("Snapshot Creation Successful")

        # Activate the snapshots
        g.log.info("Activating snapshots")
        for snap_count in range(1, 5):
            self.snap = "snap%s" % snap_count
            ret, _, _ = snap_activate(self.mnode, self.snap)
            self.assertEqual(ret, 0,
                             ("Failed to activate snapshot %s" % self.snap))
            g.log.info("Snapshot snap%s activated successfully", self.snap)

        # snapshot list
        g.log.info("Starting to list snapshots")
        ret, out, _ = snap_list(self.mnode)
        self.assertEqual(ret, 0, "Failed to list snapshot")
        snap_count = out.strip().split("\n")
        self.assertEqual(len(snap_count), 4, "Failed to list all snaps")
        g.log.info("Snapshot list Validated successfully")

        # Enable USS
        g.log.info("Enable USS on volume")
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable USS on cloned volume")
        g.log.info("Successfully enabled USS on Cloned volume")

        # Validate USS running
        g.log.info("Validating USS enabled or disabled")
        ret = is_uss_enabled(self.mnode, self.volname)
        self.assertTrue(ret, ("USS is disabled in volume %s" % self.volname))
        g.log.info("USS enabled in Volume %s", self.volname)

        # Validate snapshots under .snaps folder
        self.validate_snaps()

        # check snapshots are listed
        g.log.info(".snaps Containing:")
        for mount_obj in self.mounts:
            ret, _, _ = uss_list_snaps(mount_obj.client_system,
                                       mount_obj.mountpoint)
            self.assertEqual(ret, 0, "Failed to list snapshot information")
            g.log.info("Successfully Listed snapshots Created")

        # Disable USS running
        g.log.info("Disable USS on volume")
        ret, _, _ = disable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to disable USS on volume")
        g.log.info("Successfully disabled USS on volume")

        # check snapshots are listed
        g.log.info(".snaps Containing:")
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(ret, 0, "Failed to list snapshot information")
            g.log.info("Successfully listed snapshots Created")

        # Validate after disabling USS, all files should be same
        g.log.info("Validate files after disabling uss")
        after_uss_disable = out.strip().split('\n')
        # deleting the mount path from list
        del after_uss_disable[0]
        for files in before_uss_enable:
            self.assertIn(files, after_uss_disable,
                          "Files are Same under .snaps")
        g.log.info("Validated files under .snaps directory")