def test_volume_status_xml(self):

        # create a two node cluster
        ret = peer_probe_servers(self.servers[0], self.servers[1])
        self.assertTrue(
            ret,
            "Peer probe failed to %s from %s" % (self.mnode, self.servers[1]))

        # create a distributed volume with single node
        number_of_bricks = 1
        servers_info_from_single_node = {}
        servers_info_from_single_node[self.servers[0]] = self.all_servers_info[
            self.servers[0]]

        bricks_list = form_bricks_list(self.mnode, self.volname,
                                       number_of_bricks, self.servers[0],
                                       servers_info_from_single_node)
        ret, _, _ = volume_create(self.servers[0], self.volname, bricks_list)
        self.assertEqual(ret, 0, "Volume creation failed")
        g.log.info("Volume %s created successfully", self.volname)

        # Get volume status
        ret, _, err = volume_status(self.servers[1], self.volname)
        self.assertNotEqual(ret, 0, ("Unexpected: volume status is success for"
                                     " %s, even though volume is not started "
                                     "yet" % self.volname))
        self.assertIn("is not started", err, ("volume status exited with"
                                              " incorrect error message"))

        # Get volume status with --xml
        vol_status = get_volume_status(self.servers[1], self.volname)
        self.assertIsNone(vol_status, ("Unexpected: volume status --xml for %s"
                                       " is success even though the volume is"
                                       " not stared yet" % self.volname))

        # start the volume
        ret, _, _ = volume_start(self.servers[1], self.volname)
        self.assertEqual(ret, 0, "Failed to start volume %s" % self.volname)

        # Get volume status
        ret, _, _ = volume_status(self.servers[1], self.volname)
        self.assertEqual(ret, 0,
                         ("Failed to get volume status for %s" % self.volname))

        # Get volume status with --xml
        vol_status = get_volume_status(self.servers[1], self.volname)
        self.assertIsNotNone(vol_status,
                             ("Failed to get volume "
                              "status --xml for %s" % self.volname))

        # Verify there are no crashes while executing gluster volume status
        status = True
        glusterd_log = (self._get_test_specific_glusterd_log(
            self.mnode).split("\n"))
        for line in glusterd_log:
            if ' E ' in glusterd_log:
                status = False
                g.log.info("Unexpected! Error found %s", line)

        self.assertTrue(status, "Error found in glusterd logs")
Exemplo n.º 2
0
    def test_volume_status_xml(self):

        # create a two node cluster
        ret = peer_probe_servers(self.servers[0], self.servers[1])
        self.assertTrue(
            ret,
            "Peer probe failed to %s from %s" % (self.mnode, self.servers[1]))

        # create a distributed volume with single node
        number_of_bricks = 1
        servers_info_from_single_node = {}
        servers_info_from_single_node[self.servers[0]] = self.all_servers_info[
            self.servers[0]]

        bricks_list = form_bricks_list(self.mnode, self.volname,
                                       number_of_bricks, self.servers[0],
                                       servers_info_from_single_node)
        ret, _, _ = volume_create(self.servers[0], self.volname, bricks_list)
        self.assertEqual(ret, 0, "Volume creation failed")
        g.log.info("Volume %s created successfully", self.volname)

        # Get volume status
        ret, _, err = volume_status(self.servers[1], self.volname)
        self.assertNotEqual(ret, 0, ("Unexpected: volume status is success for"
                                     " %s, even though volume is not started "
                                     "yet" % self.volname))
        self.assertIn("is not started", err, ("volume status exited with"
                                              " incorrect error message"))

        # Get volume status with --xml
        vol_status = get_volume_status(self.servers[1], self.volname)
        self.assertIsNone(vol_status, ("Unexpected: volume status --xml for %s"
                                       " is success even though the volume is"
                                       " not stared yet" % self.volname))

        # start the volume
        ret, _, _ = volume_start(self.servers[1], self.volname)
        self.assertEqual(ret, 0, "Failed to start volume %s" % self.volname)

        # Get volume status
        ret, _, _ = volume_status(self.servers[1], self.volname)
        self.assertEqual(ret, 0,
                         ("Failed to get volume status for %s" % self.volname))

        # Get volume status with --xml
        vol_status = get_volume_status(self.servers[1], self.volname)
        self.assertIsNotNone(vol_status,
                             ("Failed to get volume "
                              "status --xml for %s" % self.volname))
Exemplo n.º 3
0
    def test_volume_absent_bricks(self):
        """
        Test Case:
        1) Create Volume
        2) Remove any one Brick directory
        3) Start Volume and compare the failure message
        4) Check the gluster volume status nad compare the status message
        """
        # Fetching the brick list
        brick_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(brick_list, "Failed to get the bricks in"
                             " the volume")

        # Command for removing brick directory
        random_brick = random.choice(brick_list)
        node, brick_path = random_brick.split(r':')
        cmd = 'rm -rf ' + brick_path

        # Removing brick directory of one node
        ret, _, _ = g.run(node, cmd)
        self.assertEqual(ret, 0, "Failed to remove brick dir")
        g.log.info("Brick directory removed successfully")

        # Starting volume
        ret, _, err = volume_start(self.mnode, self.volname)
        self.assertNotEqual(
            ret, 0, "Unexpected: Volume started successfully "
            "even though brick directory is removed "
            "for %s" % self.volname)
        g.log.info("Expected: Failed to start volume %s", self.volname)

        # Checking volume start failed message
        msg = "Failed to find brick directory"
        self.assertIn(
            msg, err, "Expected message is %s but volume start "
            "command failed with this "
            "message %s" % (msg, err))
        g.log.info("Volume start failed with correct error message %s", err)

        # Checking Volume status
        ret, _, err = volume_status(self.mnode, self.volname)
        self.assertNotEqual(
            ret, 0, "Success in getting volume status, volume "
            "status should fail when volume is in "
            "not started state ")
        g.log.info("Failed to get volume status which is expected")

        # Checking volume status message
        msg = ' '.join(['Volume', self.volname, 'is not started'])
        self.assertIn(msg, err, 'Incorrect error message for gluster vol '
                      'status')
        g.log.info("Correct error message for volume status")
Exemplo n.º 4
0
    def test_default_log_level_of_cli(self):
        """
        Test Case:
        1) Create and start a volume
        2) Run volume info command
        3) Run volume status command
        4) Run volume stop command
        5) Run volume start command
        6) Check the default log level of cli.log
        """
        # Check volume info operation
        ret, _, _ = volume_info(self.mnode)
        self.assertEqual(
            ret, 0, "Failed to execute volume info"
            " command on node: %s" % self.mnode)
        g.log.info(
            "Successfully executed the volume info command on"
            " node: %s", self.mnode)

        # Check volume status operation
        ret, _, _ = volume_status(self.mnode)
        self.assertEqual(
            ret, 0, "Failed to execute volume status command"
            " on node: %s" % self.mnode)
        g.log.info(
            "Successfully executed the volume status command"
            " on node: %s", self.mnode)

        # Check volume stop operation
        ret, _, _ = volume_stop(self.mnode, self.volname)
        self.assertEqual(
            ret, 0, "Failed to stop the volume %s on node: %s" %
            (self.volname, self.mnode))
        g.log.info("Successfully stopped the volume %s on node: %s",
                   self.volname, self.mnode)

        # Check volume start operation
        ret, _, _ = volume_start(self.mnode, self.volname)
        self.assertEqual(
            ret, 0, "Failed to start the volume %s on node: %s" %
            (self.volname, self.mnode))
        g.log.info("Successfully started the volume %s on node: %s",
                   self.volname, self.mnode)

        # Check the default log level of cli.log
        cmd = 'cat /var/log/glusterfs/cli.log | grep -F "] D [" | wc -l'
        ret, out, _ = g.run(self.mnode, cmd)
        self.assertEqual(ret, 0, "Failed to execute the command")
        self.assertEqual(
            int(out), 0, "Unexpected: Default log level of "
            "cli.log is not INFO")
        g.log.info("Default log level of cli.log is INFO as expected")
def restart_block_hosting_volume(gluster_pod,
                                 block_hosting_vol,
                                 sleep_time=120,
                                 hostname=None):
    """restars block hosting volume service

    Args:
        hostname (str): hostname on which gluster pod exists
        gluster_pod (podcmd | str): gluster pod class object has gluster
                                    pod and ocp master node or gluster
                                    pod name
        block_hosting_vol (str): name of block hosting volume
    """
    gluster_pod = _get_gluster_pod(gluster_pod, hostname)

    gluster_volume_status = get_volume_status(gluster_pod, block_hosting_vol)
    if not gluster_volume_status:
        raise AssertionError("failed to get gluster volume status")

    g.log.info("Gluster volume %s status\n%s : " %
               (block_hosting_vol, gluster_volume_status))

    ret, out, err = volume_stop(gluster_pod, block_hosting_vol)
    if ret != 0:
        err_msg = "failed to stop gluster volume %s on pod %s error: %s" % (
            block_hosting_vol, gluster_pod, err)
        g.log.error(err_msg)
        raise AssertionError(err_msg)

    # Explicit wait to stop ios and pvc creation for 2 mins
    time.sleep(sleep_time)
    ret, out, err = volume_start(gluster_pod, block_hosting_vol, force=True)
    if ret != 0:
        err_msg = "failed to start gluster volume %s on pod %s error: %s" % (
            block_hosting_vol, gluster_pod, err)
        g.log.error(err_msg)
        raise AssertionError(err_msg)

    ret, out, err = volume_status(gluster_pod, block_hosting_vol)
    if ret != 0:
        err_msg = ("failed to get status for gluster volume %s on pod %s "
                   "error: %s" % (block_hosting_vol, gluster_pod, err))
        g.log.error(err_msg)
        raise AssertionError(err_msg)
Exemplo n.º 6
0
def log_volume_info_and_status(mnode, volname):
    """Logs volume info and status
    Args:
        mnode (str): Node on which cmd has to be executed.
        volname (str): volume name
    Returns:
        bool: Returns True if getting volume info and status is successful.
            False Otherwise.
    """
    ret, _, _ = volume_info(mnode, volname)
    if ret:
        g.log.error("Failed to get volume info %s", volname)
        return False

    ret, _, _ = volume_status(mnode, volname)
    if ret:
        g.log.error("Failed to get volume status %s", volname)
        return False

    return True
def restart_file_volume(file_vol, sleep_time=120):
    """Restart file volume (stop and start volume).

    Args:
        file_vol (str): name of a file volume
    """
    gluster_volume_status = get_volume_status(
        "auto_get_gluster_endpoint", file_vol)
    if not gluster_volume_status:
        raise AssertionError("failed to get gluster volume status")

    g.log.info("Gluster volume %s status\n%s : " % (
        file_vol, gluster_volume_status)
    )

    ret, out, err = volume_stop("auto_get_gluster_endpoint", file_vol)
    if ret != 0:
        err_msg = "Failed to stop gluster volume %s. error: %s" % (
            file_vol, err)
        g.log.error(err_msg)
        raise AssertionError(err_msg)

    # Explicit wait to stop ios and pvc creation for 2 mins
    time.sleep(sleep_time)

    ret, out, err = volume_start(
        "auto_get_gluster_endpoint", file_vol, force=True)
    if ret != 0:
        err_msg = "failed to start gluster volume %s error: %s" % (
            file_vol, err)
        g.log.error(err_msg)
        raise AssertionError(err_msg)

    ret, out, err = volume_status("auto_get_gluster_endpoint", file_vol)
    if ret != 0:
        err_msg = ("Failed to get status for gluster volume %s error: %s" % (
            file_vol, err))
        g.log.error(err_msg)
        raise AssertionError(err_msg)
def restart_file_volume(file_vol, sleep_time=120):
    """Restars file volume service.

    Args:
        file_vol (str): name of a file volume
    """
    gluster_volume_status = get_volume_status(
        "auto_get_gluster_endpoint", file_vol)
    if not gluster_volume_status:
        raise AssertionError("failed to get gluster volume status")

    g.log.info("Gluster volume %s status\n%s : " % (
        file_vol, gluster_volume_status)
    )

    ret, out, err = volume_stop("auto_get_gluster_endpoint", file_vol)
    if ret != 0:
        err_msg = "Failed to stop gluster volume %s. error: %s" % (
            file_vol, err)
        g.log.error(err_msg)
        raise AssertionError(err_msg)

    # Explicit wait to stop ios and pvc creation for 2 mins
    time.sleep(sleep_time)

    ret, out, err = volume_start(
        "auto_get_gluster_endpoint", file_vol, force=True)
    if ret != 0:
        err_msg = "failed to start gluster volume %s error: %s" % (
            file_vol, err)
        g.log.error(err_msg)
        raise AssertionError(err_msg)

    ret, out, err = volume_status("auto_get_gluster_endpoint", file_vol)
    if ret != 0:
        err_msg = ("Failed to get status for gluster volume %s error: %s" % (
            file_vol, err))
        g.log.error(err_msg)
        raise AssertionError(err_msg)
    def test_status_string(self):
        '''
        -> Create Volume
        -> Start rebalance
        -> Check task type in volume status
        -> Check task status string in volume status
        -> Check task type in volume status xml
        -> Check task status string in volume status xml
        -> Start Remove brick operation
        -> Check task type in volume status
        -> Check task status string in volume status
        -> Check task type in volume status xml
        -> Check task status string in volume status xml
        '''

        # Start rebalance
        ret, _, _ = rebalance_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to start rebalance for volume %s"
                         % self.volname)
        g.log.info("Rebalance started successfully on volume %s",
                   self.volname)

        # Wait for rebalance to complete
        ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
        self.assertTrue(ret, "Rebalance failed for volume %s" % self.volname)
        g.log.info("Rebalance completed successfully on volume %s",
                   self.volname)

        # Getting volume status after rebalance start
        ret, out, _ = volume_status(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to get volume status for volume %s"
                         % self.volname)
        g.log.info("Volume status successful on volume %s", self.volname)
        status_list = out.splitlines()

        # Verifying task type from volume status for rebalance
        self.assertIn('Rebalance', status_list[len(status_list) - 4],
                      "Incorrect task type found in volume status for %s"
                      % self.volname)
        g.log.info("Correct task type found in volume status for %s",
                   self.volname)

        # Verifying task status string in volume status for rebalance
        self.assertIn('completed', status_list[len(status_list) - 2],
                      "Incorrect task status found in volume status for %s"
                      % self.volname)
        g.log.info("Correct task status found in volume status for %s",
                   self.volname)

        # Getting volume status --xml after rebalance start
        vol_status = get_volume_status(self.mnode, self.volname,
                                       options='tasks')

        # Verifying task type  from volume status --xml for rebalance
        self.assertEqual('Rebalance',
                         vol_status[self.volname]['task_status'][0]['type'],
                         "Incorrect task type found in volume status xml "
                         "for %s" % self.volname)
        g.log.info("Correct task type found in volume status xml for %s",
                   self.volname)

        # Verifying task status string from volume status --xml for rebalance
        self.assertEqual(
            'completed',
            vol_status[self.volname]['task_status'][0]['statusStr'],
            "Incorrect task status found in volume status "
            "xml for %s" % self.volname)
        g.log.info("Correct task status found in volume status xml %s",
                   self.volname)

        # Getting sub vols
        subvol_dict = get_subvols(self.mnode, self.volname)
        subvol = subvol_dict['volume_subvols'][1]

        # Perform remove brick start
        ret, _, _ = remove_brick(self.mnode, self.volname, subvol,
                                 'start', replica_count=3)
        self.assertEqual(ret, 0, "Failed to start remove brick operation "
                                 "for %s" % self.volname)
        g.log.info("Remove brick operation started successfully on volume %s",
                   self.volname)

        # Getting volume status after remove brick start
        ret, out, _ = volume_status(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to get volume status for volume %s"
                         % self.volname)
        g.log.info("Volume status successful on volume %s", self.volname)
        status_list = out.splitlines()

        # Verifying task type from volume status after remove brick start
        self.assertIn('Remove brick', status_list[len(status_list) - 8],
                      "Incorrect task type found in volume status for "
                      "%s" % self.volname)
        g.log.info("Correct task type found in volume status task for %s",
                   self.volname)

        # Verifying task status string in volume status after remove
        # brick start
        ret = False
        remove_status = ['completed', 'in progress']
        if (status_list[len(status_list) - 2].split(':')[1].strip() in
                remove_status):
            ret = True
        self.assertTrue(ret, "Incorrect task status found in volume status "
                             "task for %s" % self.volname)
        g.log.info("Correct task status found in volume status task for %s",
                   self.volname)

        # Getting volume status --xml after remove brick start
        vol_status = get_volume_status(self.mnode, self.volname,
                                       options='tasks')

        # Verifying task type  from volume status --xml after
        # remove brick start
        self.assertEqual('Remove brick',
                         vol_status[self.volname]['task_status'][0]['type'],
                         "Incorrect task type found in volume status xml for "
                         "%s" % self.volname)
        g.log.info("Correct task type found in volume status xml for %s",
                   self.volname)

        # Verifying task status string from volume status --xml
        # after remove brick start
        ret = False
        if (vol_status[self.volname]['task_status'][0]['statusStr'] in
                remove_status):
            ret = True
        self.assertTrue(ret, "Incorrect task status found in volume status "
                             "xml for %s" % self.volname)
        g.log.info("Correct task status found in volume status xml %s",
                   self.volname)
Exemplo n.º 10
0
    def test_rebalance_quorum(self):
        '''
        -> Create volume
        -> Stop the volume
        -> Enabling serve quorum
        -> start the volume
        -> Set server quorum ratio to 95%
        -> Stop the glusterd of any one of the node
        -> Perform rebalance operation operation
        -> Check gluster volume status
        -> start glusterd
        '''
        # Stop the Volume
        ret, _, _ = volume_stop(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to stop the volume %s" % self.volname)
        g.log.info("Volume stopped successfully %s", self.volname)

        # Enabling server quorum
        ret = set_volume_options(self.mnode, self.volname,
                                 {'cluster.server-quorum-type': 'server'})
        self.assertTrue(ret, "Failed to set quorum type for volume %s"
                        % self.volname)
        g.log.info("Able to set quorum type successfully for %s", self.volname)

        # Start the volume
        ret, _, _ = volume_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to start the volume %s"
                         % self.volname)
        g.log.info("Volume started successfully %s", self.volname)

        # Setting Quorum ratio in percentage
        ret = set_volume_options(self.mnode, 'all',
                                 {'cluster.server-quorum-ratio': '95%'})
        self.assertTrue(ret, "Failed to set server quorum ratio on %s"
                        % self.servers)
        g.log.info("Able to set server quorum ratio successfully on %s",
                   self.servers)

        # Stopping glusterd
        self.random_server = random.choice(self.servers[1:])
        ret = stop_glusterd(self.random_server)
        self.assertTrue(ret, "Failed to stop glusterd on %s"
                        % self.random_server)
        g.log.info("Glusterd stopped successfully on %s", self.random_server)

        msg = ("volume rebalance: " + self.volname + ": failed: Quorum not "
                                                     "met. Volume operation "
                                                     "not allowed")

        # Start Rebalance
        ret, _, err = rebalance_start(self.mnode, self.volname)
        self.assertNotEqual(ret, 0, "Unexpected: Rebalance should fail when "
                                    "quorum is in not met condition but "
                                    "Rebalance succeeded %s" % self.volname)
        g.log.info("Expected: Rebalance failed when quorum is in not met "
                   "condition %s", self.volname)

        # Checking Rebalance failed message
        self.assertIn(msg, err, "Error message is not correct for rebalance "
                                "operation when quorum not met")
        g.log.info("Error message is correct for rebalance operation "
                   "when quorum not met")

        # Volume Status
        ret, out, _ = volume_status(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to get volume status for %s"
                         % self.volname)
        g.log.info("Successful in getting volume status for %s", self.volname)

        # Checking volume status message
        self.assertNotIn('rebalance', out, "Unexpected: Found rebalance task "
                                           "in vol status of %s"
                         % self.volname)
        g.log.info("Expected: Not Found rebalance task in vol status of %s",
                   self.volname)

        # Starting glusterd
        ret = start_glusterd(self.random_server)
        self.assertTrue(ret, "Failed to start glusterd on %s"
                        % self.random_server)
        g.log.info("Glusted started successfully on %s", self.random_server)
Exemplo n.º 11
0
    def test_volume_absent_bricks(self):
        '''
        -> Create Volume
        -> Remove any one Brick directory
        -> Start Volume
        -> Check the gluster volume status
        '''
        num_of_bricks = 0
        replica = True

        if self.volume_type == 'distributed':
            num_of_bricks = 3
            replica = False

        elif self.volume_type == 'replicated':
            num_of_bricks = 3

        elif self.volume_type == 'distributed-replicated':
            num_of_bricks = 6

        # Forming brick list
        brick_list = form_bricks_list(self.mnode, self.volname, num_of_bricks,
                                      self.servers, self.all_servers_info)
        if replica:
            # Creating Volume
            ret, _, _ = volume_create(self.mnode,
                                      self.volname,
                                      brick_list,
                                      replica_count=3)
            self.assertEqual(ret, 0,
                             "Volume creation failed for %s" % self.volname)
            g.log.info("volume created successfully %s", self.volname)
        else:
            # Creating Volume
            ret, _, _ = volume_create(self.mnode, self.volname, brick_list)
            self.assertEqual(ret, 0,
                             "Volume creation failed for %s" % self.volname)
            g.log.info("volume created successfully %s", self.volname)

        # Command for removing brick directory
        random_brick = random.choice(brick_list)
        node, brick_path = random_brick.split(r':')
        cmd = 'rm -rf ' + brick_path

        # Removing brick directory of one node
        ret, _, _ = g.run(node, cmd)
        self.assertEqual(ret, 0, "Failed to remove brick dir")
        g.log.info("Brick directory removed successfully")

        # Starting volume
        ret, _, err = volume_start(self.mnode, self.volname)
        self.assertNotEqual(
            ret, 0, "Unexpected: Volume started successfully "
            "even though brick directory is removed "
            "for %s" % self.volname)
        g.log.info("Expected: Failed to start volume %s", self.volname)

        # Checking volume start failed message
        msg = "Failed to find brick directory"
        self.assertIn(
            msg, err, "Expected message is %s but volume start "
            "command failed with this "
            "message %s" % (msg, err))
        g.log.info("Volume start failed with correct error message %s", err)

        # Checking Volume status
        ret, _, err = volume_status(self.mnode, self.volname)
        self.assertNotEqual(
            ret, 0, "Success in getting volume status, volume "
            "status should fail when volume is in "
            "not started state ")
        g.log.info("Failed to get volume status which is expected")

        # Checking volume status message
        msg = ' '.join(['Volume', self.volname, 'is not started'])
        self.assertIn(msg, err, 'Incorrect error message for gluster vol '
                      'status')
        g.log.info("Correct error message for volume status")
Exemplo n.º 12
0
    def test_snap_del_original_volume(self):
        # pylint: disable=too-many-statements
        """
        Steps:
        1. Create and mount distributed-replicated volume
        2. Perform I/O on mountpoints
        3. Create snapshot
        4. activate snapshot created in step3
        5. clone created snapshot in step3
        6. delete original volume
        7. Validate clone volume

        """
        # Perform I/O
        all_mounts_procs = []
        g.log.info("Generating data for %s:"
                   "%s", self.mounts[0].client_system,
                   self.mounts[0].mountpoint)
        # Create files
        g.log.info('Creating files...')
        command = ("/usr/bin/env python %s create_files -f 100 "
                   "--fixed-file-size 1k %s" %
                   (self.script_upload_path, self.mounts[0].mountpoint))
        proc = g.run_async(self.mounts[0].client_system,
                           command,
                           user=self.mounts[0].user)
        all_mounts_procs.append(proc)
        self.io_validation_complete = False

        # Validate IO
        self.assertTrue(validate_io_procs(all_mounts_procs, self.mounts[0]),
                        "IO failed on some of the clients")
        self.io_validation_complete = True

        # Creating snapshot
        g.log.info("Starting to Create snapshot")
        ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
        self.assertEqual(ret, 0, ("Failed to create snapshot %s for "
                                  "volume %s" % (self.snap, self.volname)))
        g.log.info("Snapshot %s created successfully for volume "
                   "%s", self.snap, self.volname)

        # Activating snapshot
        g.log.info("Starting to Activate Snapshot")
        ret, _, _ = snap_activate(self.mnode, self.snap)
        self.assertEqual(ret, 0, ("Failed to Activate snapshot "
                                  "%s" % self.snap))
        g.log.info("Snapshot %s activated successfully", self.snap)

        # snapshot list
        g.log.info("getting snapshot list")
        ret, out, _ = snap_list(self.mnode)
        self.assertEqual(
            ret, 0, ("Failed to list snapshot of volume %s" % self.volname))
        self.assertIn(
            self.snap, out, "Failed to validate snapshot"
            " %s in snap list" % self.snap)
        g.log.info("Snapshot list command for volume %s is "
                   "successful", self.volname)

        # Creating a Clone of snapshot:
        g.log.info("Starting to create Clone of Snapshot")
        ret, _, _ = snap_clone(self.mnode, self.snap, self.clone)
        self.assertEqual(ret, 0,
                         ("Failed to create clone volume %s "
                          "from snapshot %s" % (self.clone, self.snap)))
        g.log.info("Clone Volume %s created successfully from snapshot "
                   "%s", self.clone, self.snap)

        # After cloning a volume wait for 5 second to start the volume
        sleep(5)

        # Validate clone volumes are started:
        g.log.info("starting to Validate clone volumes are started")
        ret, _, _ = volume_start(self.mnode, self.clone)
        self.assertEqual(ret, 0, ("Failed to start cloned volume "
                                  "%s" % self.clone))
        g.log.info("Volume %s started successfully", self.clone)

        for mount_obj in self.mounts:
            # Unmount Volume
            g.log.info("Starting to Unmount Volume %s", self.volname)
            ret = umount_volume(mount_obj.client_system,
                                mount_obj.mountpoint,
                                mtype=self.mount_type)
            self.assertTrue(ret,
                            ("Failed to Unmount Volume %s" % self.volname))
        g.log.info("Successfully Unmounted Volume %s", self.volname)

        # Delete original volume
        g.log.info("deleting original volume")
        ret = cleanup_volume(self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to delete volume %s" % self.volname))
        g.log.info("successfully deleted volume %s", self.volname)

        # get volume info
        g.log.info("Getting and validating cloned volume %s", self.clone)
        vol_info = get_volume_info(self.mnode, self.clone)
        self.assertIsNotNone(
            vol_info, "Failed to get volume info "
            "for cloned volume %s" % self.clone)
        self.assertEqual(
            vol_info[self.clone]['statusStr'], 'Started',
            "Unexpected: cloned volume is not started "
            "%s " % self.clone)
        g.log.info("Volume %s is in Started state", self.clone)

        # Volume status
        g.log.info("Getting volume status")
        ret, out, _ = volume_status(self.mnode, self.clone)
        self.assertEqual(ret, 0, "Failed to get volume status for"
                         " %s" % self.clone)
        vol = out.strip().split("\n")
        vol1 = vol[0].strip().split(":")
        self.assertEqual(
            vol1[1], " %s" % self.clone, "Failed to "
            "get volume status for volume %s" % self.clone)
        g.log.info("Volume Status is Successful for %s clone volume",
                   self.clone)

        # Volume list validate
        g.log.info("Starting to list volume")
        ret, vol_list, _ = volume_list(self.mnode)
        self.assertEqual(ret, 0, "Failed to get volume list")
        vol_list1 = vol_list.strip().split("\n")
        self.assertIn(
            "%s" % self.clone, vol_list1, "Failed to validate "
            "volume list for volume %s" % self.clone)
        g.log.info("Volume list validated Successfully for"
                   "volume %s", self.clone)