コード例 #1
0
    def test_heketi_volume_snapshot_create_with_one_brick_down(self):
        """
        Test heketi volume snapshot create with one brick down
        """
        h_vol_size = 1
        self.node = self.ocp_master_node[0]
        snap_name = 'snap_creation_test_with_one_brick_down'
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        h_vol_info = heketi_volume_create(h_node, h_url, h_vol_size, json=True)
        self.addCleanup(heketi_volume_delete, h_node, h_url, h_vol_info["id"])
        h_volume_name = h_vol_info["name"]
        pids_before = self._get_bricks_pids(h_volume_name)
        self.assertTrue(
            pids_before,
            "Failed to get the brick process for volume {}".format(
                h_volume_name))

        # kill only one brick process
        cmd = "kill -9 {}".format(pids_before[0][1])
        cmd_run_on_gluster_pod_or_node(self.node, cmd, pids_before[0][0])
        pids_after = self._get_bricks_pids(h_volume_name)
        self.assertTrue(
            pids_after,
            "Failed to get the brick process for volume {}".format(
                h_volume_name))
        self.assertTrue(
            pids_after[0][1],
            "Failed to kill brick process {} on brick {}".format(
                pids_before[0][1], pids_after[0][0]))

        # Get the snapshot list
        ret, out, err = snap_list('auto_get_gluster_endpoint')
        self.assertFalse(
            ret,
            "Failed to list snapshot from gluster side due to error"
            " {}".format(err))
        snap_list_before = out.split("\n")
        ret, out, err = snap_create(
            'auto_get_gluster_endpoint', h_volume_name,
            snap_name, timestamp=False)
        exp_err_msg = "Snapshot command failed\n"
        self.assertTrue(
            ret, "Failed to run snapshot create cmd from gluster side "
            "with error {}".format(err))
        self.assertEqual(
            out, exp_err_msg,
            "Expecting error msg {} and {} to match".format(
                out, exp_err_msg))

        # Check for count after snapshot creation
        ret, out, err = snap_list('auto_get_gluster_endpoint')
        self.assertFalse(
            ret,
            "Failed to list snapshot from gluster with error {}".format(err))
        snap_list_after = out.split("\n")
        self.assertEqual(
            snap_list_before, snap_list_after,
            "Expecting Snapshot count before {} and after creation {} to be "
            "same".format(snap_list_before, snap_list_after))
コード例 #2
0
    def test_heketi_volume_snapshot_create(self):
        """Test heketi volume snapshot create operation"""
        h_volume_size = 1
        snap_name = 'snap_test_heketi_volume_snapshot_create_1'
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        h_volume_info = heketi_volume_create(h_node,
                                             h_url,
                                             h_volume_size,
                                             json=True)
        self.addCleanup(heketi_volume_delete, h_node, h_url,
                        h_volume_info["id"])

        h_volume_name = h_volume_info["name"]
        ret, _, _ = snap_create('auto_get_gluster_endpoint',
                                h_volume_name,
                                snap_name,
                                timestamp=False)
        self.addCleanup(podcmd.GlustoPod()(snap_delete),
                        "auto_get_gluster_endpoint", snap_name)
        self.assertEqual(
            ret, 0, "Failed to create snapshot {} for heketi volume {}".format(
                snap_name, h_volume_name))
        ret, out, _ = snap_list('auto_get_gluster_endpoint')
        self.assertEqual(
            ret, 0,
            "Failed to list snapshot {} for heketi volume".format(snap_name))
        self.assertIn(
            snap_name, out,
            "Heketi volume snapshot {} not found in {}".format(snap_name, out))
コード例 #3
0
        def create_snap(value, volname, snap, clone, counter):
            # Creating snapshots
            g.log.info("Starting to Create snapshot")
            for snap_count in value:
                ret, _, _ = snap_create(self.mnode, volname,
                                        "snap%s" % snap_count)
                self.assertEqual(ret, 0, ("Failed to create "
                                          "snapshot for volume %s" % volname))
                g.log.info(
                    "Snapshot snap%s created successfully"
                    " for volume %s", snap_count, volname)

            # Validate snapshot list
            g.log.info("Starting to list all snapshots")
            ret, out, _ = snap_list(self.mnode)
            self.assertEqual(
                ret, 0, ("Failed to list snapshot of volume %s" % volname))
            v_list = out.strip().split('\n')
            self.assertEqual(len(v_list), counter, "Failed to validate "
                             "all snapshots")
            g.log.info(
                "Snapshot listed and  Validated for volume %s"
                " successfully", volname)
            if counter == 40:
                return 0

            # Creating a Clone of snapshot:
            g.log.info("Starting to Clone Snapshot")
            ret, _, _ = snap_clone(self.mnode, snap, clone)
            self.assertEqual(ret, 0, "Failed to clone %s" % clone)
            g.log.info("Clone volume %s created successfully", clone)

            # Start cloned volumes
            g.log.info("starting to Validate clone volumes are started")
            ret, _, _ = volume_start(self.mnode, clone)
            self.assertEqual(ret, 0, "Failed to start %s" % clone)
            g.log.info("%s started successfully", clone)

            # log Cloned Volume information
            g.log.info("Logging Volume info and Volume status")
            ret = log_volume_info_and_status(self.mnode, clone)
            self.assertTrue("Failed to Log Info and Status of Volume %s" %
                            clone)
            g.log.info("Successfully Logged Info and Status")
            return counter + 10
コード例 #4
0
    def test_create_snap_bricks(self):
        """
        1. get brick list
        2. check all bricks are online
        3. Selecting one brick randomly to bring it offline
        4. get brick list
        5. check all bricks are online
        6. Offline Bricks list
        7. Online Bricks list
        8. Create snapshot of volume
        9. snapshot create should fail
        """

        bricks_list = []
        # get the bricks from the volume
        g.log.info("Fetching bricks for the volume : %s" % self.volname)
        bricks_list = get_all_bricks(self.mnode, self.volname)
        g.log.info("Brick List : %s" % bricks_list)

        # check all bricks are online
        g.log.info("Verifying all bricks are online or not.....")
        ret = are_bricks_online(self.mnode, self.volname, bricks_list)
        self.assertTrue(ret, ("Not all bricks are online"))
        g.log.info("All bricks are online.")

        # Selecting one brick randomly to bring it offline
        g.log.info("Selecting one brick randomly to bring it offline")
        brick_to_bring_offline = random.choice(bricks_list)
        g.log.info("Brick to bring offline:%s " % brick_to_bring_offline)
        ret = bring_bricks_offline(self.volname, brick_to_bring_offline, None)
        self.assertTrue(ret, "Failed to bring the bricks offline")
        g.log.info("Randomly Selected brick: %s" % brick_to_bring_offline)

        # get brick list
        g.log.info("Fetching bricks for the volume : %s" % self.volname)
        bricks_list = get_all_bricks(self.mnode, self.volname)
        g.log.info("Brick List : %s" % bricks_list)

        # check all bricks are online
        g.log.info("Verifying all bricks are online or not.....")
        ret = are_bricks_online(self.mnode, self.volname, bricks_list)
        self.assertFalse(ret, ("Not all bricks are online"))
        g.log.info("All bricks are online.")

        # get the bricks for the volume
        g.log.info("Fetching bricks for the volume : %s" % self.volname)
        bricks_list = get_all_bricks(self.mnode, self.volname)
        g.log.info("Brick List : %s" % bricks_list)

        # Offline Bricks list
        offbricks = get_offline_bricks_list(self.mnode, self.volname)
        g.log.info("Bricks Offline: %s" % offbricks)

        # Online Bricks list
        onbricks = get_online_bricks_list(self.mnode, self.volname)
        g.log.info("Bricks Online: %s" % onbricks)

        # Create snapshot of volume
        ret = snap_create(self.mnode, self.volname, "snap1", False,
                          "Description with $p3c1al characters!")
        self.assertTrue(ret, ("Failed to create snapshot snap1"))
        g.log.info("Snapshot snap1 of volume %s created Successfully" %
                   (self.volname))

        # Volume status
        ret = get_volume_info(self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to perform gluster volume"
                              "info on volume %s" % self.volname))
        g.log.info("Gluster volume info on volume %s is successful" %
                   self.volname)
        # snapshot list
        ret = snap_list(self.mnode)
        self.assertTrue(
            ret, ("Failed to list snapshot of volume %s" % self.volname))
        g.log.info("Snapshot list command for volume %s was successful" %
                   self.volname)
コード例 #5
0
    def test_clone_delete_snap(self):
        """
        clone from snap of one volume
        * Create and Mount the volume
        * Enable some volume options
        * Creating 2 snapshots and activate
        * reset the volume
        * create a clone of snapshots created
        * Mount both the clones
        * Perform I/O on mount point
        * Check volume options of cloned volumes
        * Create snapshot of the cloned snapshot volume
        * cleanup snapshots and volumes
        """

        # pylint: disable=too-many-statements, too-many-locals
        # Enabling Volume options on the volume and validating
        g.log.info("Enabling volume options for volume %s ", self.volname)
        options = {" features.uss": "enable"}
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(
            ret, ("Failed to set volume options for volume %s" % self.volname))
        g.log.info("Successfully set volume options"
                   "for volume %s", self.volname)

        # Validate feature.uss enabled or not
        g.log.info("Validating feature.uss is enabled")
        option = "features.uss"
        vol_option = get_volume_options(self.mnode, self.volname, option)
        self.assertEqual(vol_option['features.uss'], 'enable', "Failed"
                         " to validate "
                         "volume options")
        g.log.info("Successfully validated volume options"
                   "for volume %s", self.volname)

        # Creating snapshot
        g.log.info("Starting to Create snapshot")
        for snap_count in range(0, 2):
            ret, _, _ = snap_create(self.mnode, self.volname,
                                    "snap%s" % snap_count)
            self.assertEqual(
                ret, 0,
                ("Failed to create snapshot for volume %s" % self.volname))
            g.log.info("Snapshot snap%s created successfully"
                       "for volume %s", snap_count, self.volname)

        # Activating snapshot
        g.log.info("Starting to Activate Snapshot")
        for snap_count in range(0, 2):
            ret, _, _ = snap_activate(self.mnode, "snap%s" % snap_count)
            self.assertEqual(
                ret, 0, ("Failed to Activate snapshot snap%s" % snap_count))
            g.log.info("Snapshot snap%s activated successfully", snap_count)

        # Reset volume:
        g.log.info("Starting to Reset Volume")
        ret, _, _ = volume_reset(self.mnode, self.volname, force=False)
        self.assertEqual(ret, 0, ("Failed to reset volume %s" % self.volname))
        g.log.info("Reset Volume on volume %s is Successful", self.volname)

        # Validate feature.uss enabled or not
        g.log.info("Validating feature.uss is enabled")
        option = "features.uss"
        vol_option = get_volume_options(self.mnode, self.volname, option)
        self.assertEqual(vol_option['features.uss'], 'off', "Failed"
                         " to validate "
                         "volume options")
        g.log.info("Successfully validated volume options"
                   "for volume %s", self.volname)

        # Verify volume's all process are online
        g.log.info("Starting to Verify volume's all process are online")
        ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
        self.assertTrue(ret, ("Volume %s : All process are"
                              "not online" % self.volname))
        g.log.info("Volume %s : All process are online", self.volname)

        # Creating and starting a Clone of snapshot
        g.log.info("Starting to Clone Snapshot")
        for clone_count in range(0, 2):
            ret, _, _ = snap_clone(self.mnode, "snap%s" % clone_count,
                                   "clone%s" % clone_count)
            self.assertEqual(ret, 0,
                             ("Failed to clone clone%s volume" % clone_count))
            g.log.info("clone%s volume created successfully", clone_count)

        # Start Cloned volume
        g.log.info("starting to Validate clone volumes are started")
        for clone_count in range(0, 2):
            ret, _, _ = volume_start(self.mnode, "clone%s" % clone_count)
            self.assertEqual(ret, 0, ("Failed to start clone%s" % clone_count))
            g.log.info("clone%s started successfully", clone_count)
        g.log.info("All the clone volumes are started Successfully")

        # Validate Volume start of cloned volume
        g.log.info("Starting to Validate Volume start")
        for clone_count in range(0, 2):
            vol_info = get_volume_info(self.mnode, "clone%s" % clone_count)
            if vol_info["clone%s" % clone_count]['statusStr'] != 'Started':
                raise ExecutionError("Failed to get volume info for clone%s" %
                                     clone_count)
            g.log.info("Volume clone%s is in Started state", clone_count)

        # Validate feature.uss enabled or not
        g.log.info("Validating feature.uss is enabled")
        option = "features.uss"
        for clone_count in range(0, 2):
            vol_option = get_volume_options(self.mnode,
                                            "clone%s" % clone_count, option)
            self.assertEqual(vol_option['features.uss'], 'enable', "Failed"
                             " to validate"
                             "volume options")
            g.log.info(
                "Successfully validated volume options"
                "for volume clone%s", clone_count)

        # Mount both the cloned volumes
        g.log.info("Mounting Cloned Volumes")
        for mount_obj in range(0, 2):
            self.mpoint = "/mnt/clone%s" % mount_obj
            cmd = "mkdir -p  %s" % self.mpoint
            ret, _, _ = g.run(self.clients[0], cmd)
            self.assertEqual(ret, 0, ("Creation of directory %s"
                                      "for mounting"
                                      "volume %s failed: Directory already"
                                      "present" %
                                      (self.mpoint, "clone%s" % mount_obj)))
            g.log.info(
                "Creation of directory %s for mounting volume %s "
                "success", self.mpoint, ("clone%s" % mount_obj))
            ret, _, _ = mount_volume("clone%s" % mount_obj, self.mount_type,
                                     self.mpoint, self.mnode, self.clients[0])
            self.assertEqual(ret, 0, ("clone%s is not mounted" % mount_obj))
            g.log.info("clone%s is mounted Successfully", mount_obj)

        # Perform I/O on mount
        # Start I/O on all mounts
        g.log.info("Starting to Perform I/O on Mountpoint")
        all_mounts_procs = []
        for mount_obj in range(0, 2):
            cmd = ("cd /mnt/clone%s/; for i in {1..10};"
                   "do touch file$i; done; cd;") % mount_obj
            proc = g.run(self.clients[0], cmd)
            all_mounts_procs.append(proc)
        g.log.info("I/O on mountpoint is successful")

        # create snapshot
        g.log.info("Starting to Create snapshot of clone volume")
        ret0, _, _ = snap_create(self.mnode, "clone0", "snap2")
        self.assertEqual(ret0, 0, "Failed to create the snapshot"
                         "snap2 from clone0")
        g.log.info("Snapshots snap2 created successfully from clone0")
        ret1, _, _ = snap_create(self.mnode, "clone1", "snap3")
        self.assertEqual(ret1, 0, "Failed to create the snapshot snap3"
                         "from clone1")
        g.log.info("Snapshots snap3 created successfully from clone1")

        # Listing all Snapshots present
        g.log.info("Starting to list all snapshots")
        ret, _, _ = snap_list(self.mnode)
        self.assertEqual(ret, 0, ("Failed to list snapshots present"))
        g.log.info("Snapshots successfully listed")
コード例 #6
0
    def test_snap_self_heal(self):
        """
        Steps:

        1. create a volume
        2. mount volume
        3. create snapshot of that volume
        4. Activate snapshot
        5. Clone snapshot and Mount
        6. Perform I/O
        7. Bring Down Few bricks from volume without
           affecting the volume or cluster.
        8. Perform I/O
        9. Bring back down bricks to online
        10. Validate heal is complete with areequal

        """
        # pylint: disable=too-many-statements, too-many-locals
        # Creating snapshot:
        g.log.info("Starting to Create snapshot")
        ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
        self.assertEqual(
            ret, 0, ("Failed to create snapshot for volume %s" % self.volname))
        g.log.info("Snapshot %s created successfully for volume %s", self.snap,
                   self.volname)

        # Activating snapshot
        g.log.info("Starting to Activate Snapshot")
        ret, _, _ = snap_activate(self.mnode, self.snap)
        self.assertEqual(ret, 0,
                         ("Failed to Activate snapshot %s" % self.snap))
        g.log.info("Snapshot %s activated successfully", self.snap)

        # snapshot list
        ret, _, _ = snap_list(self.mnode)
        self.assertEqual(ret, 0, ("Failed to list all the snapshot"))
        g.log.info("Snapshot list command was successful")

        # Creating a Clone volume from snapshot:
        g.log.info("Starting to Clone volume from Snapshot")
        ret, _, _ = snap_clone(self.mnode, self.snap, self.clone)
        self.assertEqual(ret, 0, ("Failed to clone %s from snapshot %s" %
                                  (self.clone, self.snap)))
        g.log.info("%s created successfully", self.clone)

        #  start clone volumes
        g.log.info("start to created clone volumes")
        ret, _, _ = volume_start(self.mnode, self.clone)
        self.assertEqual(ret, 0, "Failed to start clone %s" % self.clone)
        g.log.info("clone volume %s started successfully", self.clone)

        # Mounting a clone volume
        g.log.info("Mounting a clone volume")
        ret, _, _ = mount_volume(self.clone, self.mount_type, self.mount1,
                                 self.mnode, self.clients[0])
        self.assertEqual(ret, 0,
                         "Failed to mount clone Volume %s" % self.clone)
        g.log.info("Clone volume %s mounted Successfully", self.clone)

        # Checking cloned volume mounted or not
        ret = is_mounted(self.clone, self.mount1, self.mnode, self.clients[0],
                         self.mount_type)
        self.assertTrue(
            ret,
            "Failed to mount clone volume on mount point: %s" % self.mount1)
        g.log.info("clone Volume %s mounted on %s", self.clone, self.mount1)

        # write files on all mounts
        g.log.info("Starting IO on all mounts...")
        g.log.info("mounts: %s", self.mount1)
        all_mounts_procs = []
        cmd = ("python %s create_files "
               "-f 10 --base-file-name file %s" %
               (self.script_upload_path, self.mount1))
        proc = g.run(self.clients[0], cmd)
        all_mounts_procs.append(proc)
        g.log.info("Successful in creating I/O on mounts")

        # get the bricks from the volume
        g.log.info("Fetching bricks for the volume : %s", self.clone)
        bricks_list = get_all_bricks(self.mnode, self.clone)
        g.log.info("Brick List : %s", bricks_list)

        # Select bricks to bring offline
        g.log.info("Starting to bring bricks to offline")
        bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
            self.mnode, self.volname))
        bricks_to_bring_offline = filter(
            None, (bricks_to_bring_offline_dict['hot_tier_bricks'] +
                   bricks_to_bring_offline_dict['cold_tier_bricks'] +
                   bricks_to_bring_offline_dict['volume_bricks']))
        g.log.info("Brick to bring offline: %s ", bricks_to_bring_offline)
        ret = bring_bricks_offline(self.clone, bricks_to_bring_offline)
        self.assertTrue(ret, "Failed to bring the bricks offline")
        g.log.info("Successful in bringing bricks: %s offline",
                   bricks_to_bring_offline)

        # Offline Bricks list
        offline_bricks = get_offline_bricks_list(self.mnode, self.clone)
        self.assertIsNotNone(
            offline_bricks, "Failed to get offline bricklist"
            "for volume %s" % self.clone)
        for bricks in offline_bricks:
            self.assertIn(bricks, bricks_to_bring_offline,
                          "Failed to validate "
                          "Bricks offline")
        g.log.info("Bricks Offline: %s", offline_bricks)

        # Online Bricks list
        online_bricks = get_online_bricks_list(self.mnode, self.clone)
        self.assertIsNotNone(
            online_bricks, "Failed to get online bricks"
            " for volume %s" % self.clone)
        g.log.info("Bricks Online: %s", online_bricks)

        # write files mountpoint
        g.log.info("Starting IO on all mounts...")
        g.log.info("mounts: %s", self.mount1)
        all_mounts_procs = []
        cmd = ("python %s create_files "
               "-f 10 --base-file-name file %s" %
               (self.script_upload_path, self.mount1))
        proc = g.run(self.clients[0], cmd)
        all_mounts_procs.append(proc)
        g.log.info("Successful in creating I/O on mounts")

        # Bring all bricks online
        g.log.info("bring all bricks online")
        ret = bring_bricks_online(self.mnode, self.clone,
                                  bricks_to_bring_offline)
        self.assertTrue(ret, "Failed to bring bricks online")
        g.log.info("Successful in bringing all bricks online")

        # Validate Bricks are online
        g.log.info("Validating all bricks are online")
        ret = are_bricks_online(self.mnode, self.clone, bricks_list)
        self.assertTrue(ret, "Failed to bring all the bricks online")
        g.log.info("bricks online: %s", bricks_list)

        # Wait for volume processes to be online
        g.log.info("Wait for volume processes to be online")
        ret = wait_for_volume_process_to_be_online(self.mnode, self.clone)
        self.assertTrue(ret, ("Failed to wait for volume %s processes to "
                              "be online" % self.clone))
        g.log.info(
            "Successful in waiting for volume %s processes to be "
            "online", self.clone)

        # Verify volume's all process are online
        g.log.info("Verifying volume's all process are online")
        ret = verify_all_process_of_volume_are_online(self.mnode, self.clone)
        self.assertTrue(
            ret, ("Volume %s : All process are not online" % self.clone))
        g.log.info("Volume %s : All process are online", self.clone)

        # wait for the heal process to complete
        g.log.info("waiting for heal process to complete")
        ret = monitor_heal_completion(self.mnode, self.volname)
        self.assertTrue(ret, "Failed to complete the heal process")
        g.log.info("Successfully completed heal process")

        # Check areequal
        # get the subvolumes
        g.log.info("Starting to get sub-volumes for volume %s", self.clone)
        subvols = get_subvols(self.mnode, self.clone)
        num_subvols = len(subvols['volume_subvols'])
        g.log.info("Number of subvolumes in volume %s:", num_subvols)

        # Get arequals and compare
        g.log.info("Starting to Compare areequals")
        for i in range(0, num_subvols):
            # Get arequal for first brick
            subvol_brick_list = subvols['volume_subvols'][i]
            node, brick_path = subvol_brick_list[0].split(':')
            command = ('arequal-checksum -p %s '
                       '-i .glusterfs -i .landfill -i .trashcan' % brick_path)
            ret, arequal, _ = g.run(node, command)
            first_brick_total = arequal.splitlines()[-1].split(':')[-1]

        # Get arequal for every brick and compare with first brick
        for brick in subvol_brick_list:
            node, brick_path = brick.split(':')
            command = ('arequal-checksum -p %s '
                       '-i .glusterfs -i .landfill -i .trashcan' % brick_path)
            ret, brick_arequal, _ = g.run(node, command)
            self.assertFalse(ret, 'Failed to get arequal on brick %s' % brick)
            g.log.info('Getting arequal for %s is successful', brick)
            brick_total = brick_arequal.splitlines()[-1].split(':')[-1]
            self.assertEqual(
                first_brick_total, brick_total,
                'Arequals for subvol and %s are not equal' % brick)
            g.log.info('Arequals for subvol and %s are equal', brick)
        g.log.info('All arequals are equal for distributed-replicated')
コード例 #7
0
    def test_snap_del_original_volume(self):
        # pylint: disable=too-many-statements
        """
        Steps:
        1. Create and mount distributed-replicated volume
        2. Perform I/O on mountpoints
        3. Create snapshot
        4. activate snapshot created in step3
        5. clone created snapshot in step3
        6. delete original volume
        7. Validate clone volume

        """
        # Perform I/O
        all_mounts_procs = []
        g.log.info("Generating data for %s:"
                   "%s", self.mounts[0].client_system,
                   self.mounts[0].mountpoint)
        # Create files
        g.log.info('Creating files...')
        command = ("/usr/bin/env python %s create_files -f 100 "
                   "--fixed-file-size 1k %s" %
                   (self.script_upload_path, self.mounts[0].mountpoint))
        proc = g.run_async(self.mounts[0].client_system,
                           command,
                           user=self.mounts[0].user)
        all_mounts_procs.append(proc)
        self.io_validation_complete = False

        # Validate IO
        self.assertTrue(validate_io_procs(all_mounts_procs, self.mounts[0]),
                        "IO failed on some of the clients")
        self.io_validation_complete = True

        # Creating snapshot
        g.log.info("Starting to Create snapshot")
        ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
        self.assertEqual(ret, 0, ("Failed to create snapshot %s for "
                                  "volume %s" % (self.snap, self.volname)))
        g.log.info("Snapshot %s created successfully for volume "
                   "%s", self.snap, self.volname)

        # Activating snapshot
        g.log.info("Starting to Activate Snapshot")
        ret, _, _ = snap_activate(self.mnode, self.snap)
        self.assertEqual(ret, 0, ("Failed to Activate snapshot "
                                  "%s" % self.snap))
        g.log.info("Snapshot %s activated successfully", self.snap)

        # snapshot list
        g.log.info("getting snapshot list")
        ret, out, _ = snap_list(self.mnode)
        self.assertEqual(
            ret, 0, ("Failed to list snapshot of volume %s" % self.volname))
        self.assertIn(
            self.snap, out, "Failed to validate snapshot"
            " %s in snap list" % self.snap)
        g.log.info("Snapshot list command for volume %s is "
                   "successful", self.volname)

        # Creating a Clone of snapshot:
        g.log.info("Starting to create Clone of Snapshot")
        ret, _, _ = snap_clone(self.mnode, self.snap, self.clone)
        self.assertEqual(ret, 0,
                         ("Failed to create clone volume %s "
                          "from snapshot %s" % (self.clone, self.snap)))
        g.log.info("Clone Volume %s created successfully from snapshot "
                   "%s", self.clone, self.snap)

        # After cloning a volume wait for 5 second to start the volume
        sleep(5)

        # Validate clone volumes are started:
        g.log.info("starting to Validate clone volumes are started")
        ret, _, _ = volume_start(self.mnode, self.clone)
        self.assertEqual(ret, 0, ("Failed to start cloned volume "
                                  "%s" % self.clone))
        g.log.info("Volume %s started successfully", self.clone)

        for mount_obj in self.mounts:
            # Unmount Volume
            g.log.info("Starting to Unmount Volume %s", self.volname)
            ret = umount_volume(mount_obj.client_system,
                                mount_obj.mountpoint,
                                mtype=self.mount_type)
            self.assertTrue(ret,
                            ("Failed to Unmount Volume %s" % self.volname))
        g.log.info("Successfully Unmounted Volume %s", self.volname)

        # Delete original volume
        g.log.info("deleting original volume")
        ret = cleanup_volume(self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to delete volume %s" % self.volname))
        g.log.info("successfully deleted volume %s", self.volname)

        # get volume info
        g.log.info("Getting and validating cloned volume %s", self.clone)
        vol_info = get_volume_info(self.mnode, self.clone)
        self.assertIsNotNone(
            vol_info, "Failed to get volume info "
            "for cloned volume %s" % self.clone)
        self.assertEqual(
            vol_info[self.clone]['statusStr'], 'Started',
            "Unexpected: cloned volume is not started "
            "%s " % self.clone)
        g.log.info("Volume %s is in Started state", self.clone)

        # Volume status
        g.log.info("Getting volume status")
        ret, out, _ = volume_status(self.mnode, self.clone)
        self.assertEqual(ret, 0, "Failed to get volume status for"
                         " %s" % self.clone)
        vol = out.strip().split("\n")
        vol1 = vol[0].strip().split(":")
        self.assertEqual(
            vol1[1], " %s" % self.clone, "Failed to "
            "get volume status for volume %s" % self.clone)
        g.log.info("Volume Status is Successful for %s clone volume",
                   self.clone)

        # Volume list validate
        g.log.info("Starting to list volume")
        ret, vol_list, _ = volume_list(self.mnode)
        self.assertEqual(ret, 0, "Failed to get volume list")
        vol_list1 = vol_list.strip().split("\n")
        self.assertIn(
            "%s" % self.clone, vol_list1, "Failed to validate "
            "volume list for volume %s" % self.clone)
        g.log.info("Volume list validated Successfully for"
                   "volume %s", self.clone)
コード例 #8
0
    def test_snap_uss_while_io(self):
        # pylint: disable=too-many-statements
        """
        Steps:
        1. Create volume
        2. enable uss on created volume
        3. validate uss running
        4. validate snapd running on all nodes
        5. perform io on mounts
        6. create 10 snapshots with description
        7. validate with snapshot list
        8. validate io is completed
        9. Activate snapshots to list all snaps
           under .snaps
        10. validate snapshots under .snaps directory
        """
        # Enable USS
        g.log.info("Enable USS for volume")
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable USS on volume"
                         "%s" % self.volname)
        g.log.info("Successfully enabled USS on volume %s", self.volname)

        # Validate USS running
        g.log.info("Validating USS enabled or disabled")
        ret = is_uss_enabled(self.mnode, self.volname)
        self.assertTrue(
            ret, "Failed to validate USS for volume "
            "%s" % self.volname)
        g.log.info("Successfully validated USS for Volume" "%s", self.volname)

        # Validate snapd running
        for server in self.servers:
            g.log.info("Validating snapd daemon on:%s", server)
            ret = is_snapd_running(server, self.volname)
            self.assertTrue(ret, "Snapd is Not running on " "%s" % server)
            g.log.info("Snapd Running on node: %s", server)

        # Perform I/O
        g.log.info("Starting to Perform I/O")
        all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Generating data for %s:"
                       "%s", mount_obj.client_system, mount_obj.mountpoint)
            # Create files
            g.log.info('Creating files...')
            command = (
                "python %s create_files -f 100 --fixed-file-size 1M %s" %
                (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               command,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
        self.io_validation_complete = False

        # Creating snapshot with description
        g.log.info("Starting to Create snapshot")
        for count in range(0, self.snap_count):
            self.snap = "snap%s" % count
            ret, _, _ = snap_create(self.mnode,
                                    self.volname,
                                    self.snap,
                                    description='$p3C!@l C#@R@cT#R$')
            self.assertEqual(
                ret, 0,
                ("Failed to create snapshot for volume %s" % self.volname))
            g.log.info("Snapshot %s created successfully"
                       " for volume %s", self.snap, self.volname)

        # Validate snapshot list
        g.log.info("Starting to list all snapshots")
        ret, out, _ = snap_list(self.mnode)
        self.assertEqual(
            ret, 0, ("Failed to list snapshot of volume %s" % self.volname))
        s_list = out.strip().split('\n')
        self.assertEqual(len(s_list), self.snap_count, "Failed to validate "
                         "all snapshots")
        g.log.info(
            "Snapshot listed and  Validated for volume %s"
            " successfully", self.volname)

        # Activating snapshot
        g.log.info("Activating snapshot")
        for count in range(0, self.snap_count):
            self.snap = "snap%s" % count
            ret, _, _ = snap_activate(self.mnode, self.snap)
            self.assertEqual(ret, 0, "Failed to Activate snapshot "
                             "%s" % self.snap)
            g.log.info("snapshot %s activated successfully", self.snap)

        # Validate IO is completed
        self.assertTrue(validate_io_procs(all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")
        self.io_validation_complete = True

        # validate snapshots are listed under .snaps directory
        g.log.info("Validating snaps under .snaps")
        ret = view_snaps_from_mount(self.mounts, s_list)
        self.assertTrue(ret, "Failed to list snaps under .snaps" "directory")
        g.log.info("Snapshots Validated successfully")
コード例 #9
0
    def test_snap_delete(self):
        # pylint: disable=too-many-statements
        """
        Delete a snapshot of a volume

        * Creating and deleting 10 snapshots
        * deleting previously created 10 snapshots
        * enabling auto-delete snapshot
        * Setting max-hard limit and max-soft-limit
        * Verify the limits by creating another 20 snapshots
        * Oldest of newly created snapshots will be deleted
        * Retaining the latest 8(softlimit) snapshots
        * cleanup snapshots and volumes
        """

        # creating 10 snapshots
        g.log.info("Creating 10 snapshots")
        for snap_count in range(0, 10):
            ret, _, _ = snap_create(self.mnode, self.volname,
                                    "snap%s" % snap_count, False,
                                    "Description with $p3c1al characters!")
            self.assertEqual(ret, 0, ("Failed to create snapshot"
                                      "snap%s" % snap_count))
            g.log.info("Snapshot snap%s of volume %s created"
                       "successfully.", snap_count, self.volname)

        # snapshot list
        g.log.info("Starting to list all snapshots")
        ret, _, _ = snap_list(self.mnode)
        self.assertEqual(ret, 0, ("Failed to list snapshot"
                                  "of volume %s" % self.volname))
        g.log.info("Snapshot list command for volume %s"
                   "was successful", self.volname)

        # deleting all 10 snapshots from the volume
        g.log.info("Starting to Delete 10 created snapshots")
        ret, _, _ = snap_delete_by_volumename(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("Failed to delete"
                                  "snapshot of volume %s" % self.volname))
        g.log.info("Snapshots of volume %s deleted Successfully", self.volname)

        # enabling auto-delete
        g.log.info("Enabling auto-delete")
        cmd = "gluster snapshot config auto-delete enable"
        ret, _, _ = g.run(self.mnode, cmd)
        self.assertEqual(ret, 0, ("Failed to enable auto-delete"
                                  "snapshot config"
                                  "option on volume % s" % self.volname))
        g.log.info("Snapshot auto-delete Successfully enabled")

        # setting max-hard-limit
        g.log.info("Setting max-hard-limit")
        option = {'snap-max-hard-limit': '10'}
        ret, _, _ = set_snap_config(self.mnode, option, self.volname)
        self.assertEqual(ret, 0, ("Failed to set snap-max-hardlimit"
                                  "config option for"
                                  "volume %s" % self.volname))
        g.log.info(
            "snap-max-hardlimit config option Successfully set for"
            "volume %s", self.volname)

        # Validating max-hard-limit
        g.log.info("Validating max-hard-limit")
        hardlimit = get_snap_config(self.mnode)
        self.assertIsNotNone(hardlimit, "Failed to get snap config")
        get_hardlimit = hardlimit['volumeConfig'][0]['hardLimit']
        self.assertEqual(get_hardlimit, '10', ("Failed to Validate"
                                               "max-hard-limit"))
        g.log.info("Successfully validated max-hard-limit")

        # setting max-soft-limit
        g.log.info("Setting max-soft-limit")
        option = {'snap-max-soft-limit': '80'}
        ret, _, _ = set_snap_config(self.mnode, option)
        self.assertEqual(ret, 0, ("Failed to set snap-max-soft-limit"
                                  "config option"))
        g.log.info("snap-max-soft-limit config option Successfully set")

        # Validating max-soft-limit
        g.log.info("Validating max-soft-limit")
        softlimit = get_snap_config(self.mnode)
        self.assertIsNotNone(softlimit, "Failed to get snap config")
        get_softlimit = softlimit['volumeConfig'][0]['softLimit']
        self.assertEqual(get_softlimit, '8', ("Failed to Validate"
                                              "max-soft-limit"))
        g.log.info("Successfully validated max-soft-limit")

        # creating 20 more snapshots. As the count
        # of snapshots crosses the
        # soft-limit the oldest of newly created snapshot should
        # be deleted only latest 8(softlimit) snapshots
        # should remain
        g.log.info("Starting to create 20 more snapshots")
        for snap_count in range(10, 30):
            ret, _, _ = snap_create(
                self.mnode, self.volname, "snap%s" % snap_count, False,
                "This is the Description with $p3c1al"
                "characters!")
            self.assertEqual(ret, 0, ("Failed to create snapshot snap%s"
                                      "for volume"
                                      "%s" % (snap_count, self.volname)))
            g.log.info("Snapshot snap%s of volume"
                       "%s created successfully.", snap_count, self.volname)

        # snapshot list to list total number of snaps after auto-delete
        g.log.info("validate total no.of snapshots after auto-delete enable")
        cmd = "gluster snapshot list | wc -l"
        ret, out, _ = g.run(self.mnode, cmd)
        tot = out.strip().split('\n')
        self.assertEqual(
            ret, 0, ("Failed to list snapshot of volume %s" % self.volname))
        g.log.info(
            "Total number of snapshots created after auto-delete"
            "enabled is %s", tot[0])
        self.assertEqual(tot[0], '8', ("Failed to validate snapshots with"
                                       "expected number of snapshots "))
        g.log.info("Successfully validated snapshots with expected"
                   "number of snapshots")
コード例 #10
0
    def test_snap_uss(self):
        # pylint: disable=too-many-statements
        """
        Steps:
        1. Create a volume and mount it.
        2. Perform I/O on mounts
        3. create a .snaps directory and create some files
        4. Create Multiple snapshots of volume
        5. Check info of volume
        6. Enable USS for volume
        7. Validate files created under .snaps
        8. Disable USS
        9. Again Validate the files created under .snaps directory
        """
        # write files on all mounts
        g.log.info("Starting IO on all mounts...")
        g.log.info("mounts: %s", self.mounts)
        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s create_files "
                   "-f 10 --base-file-name file %s" %
                   (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # Validate IO
        self.assertTrue(validate_io_procs(all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")

        # starting I/O
        g.log.info("Starting IO on all mounts...")
        for mount_obj in self.mounts:
            self.mpoint = "%s/.snaps" % mount_obj.mountpoint
            ret = file_exists(mount_obj.client_system, self.mpoint)
            if not ret:
                ret = mkdir(mount_obj.client_system, self.mpoint)
                self.assertTrue(ret, "Failed to create .snaps directory")
                g.log.info("Successfully created .snaps directory")
                break
            else:
                # Validate USS running
                g.log.info("Validating USS enabled or disabled")
                ret = is_uss_enabled(self.mnode, self.volname)
                if not ret:
                    break
                else:
                    g.log.info("USS is enabled in volume %s", self.volname)
                    ret, _, _ = disable_uss(self.mnode, self.volname)
                    self.assertEqual(
                        ret, 0, "Failed to disable USS on "
                        " volume %s" % self.volname)
                    g.log.info("USS disabled in Volume %s", self.volname)
                    ret = mkdir(mount_obj.client_system, self.mpoint)
                    self.assertTrue(ret, "Failed to create .snaps directory")
                    g.log.info("Successfully created .snaps directory")
            cmd = ("/usr/bin/env python %s create_files "
                   "-f 10 --base-file-name foo %s" %
                   (self.script_upload_path, self.mpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # List files under created .snaps directory
        g.log.info("Starting to list files under .snaps directory")
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(ret, 0, "Failed to list files under .snaps")
            g.log.info("Successfully Created files under .snaps directory")
            before_uss_enable = out.strip().split('\n')
            # deleting the mount path from list
            del before_uss_enable[0]

        # Create Multiple snapshots for volume
        g.log.info("Creating snapshots")
        self.snaps_list = []
        for snap_count in range(1, 5):
            self.snap = "snap%s" % snap_count
            ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
            self.assertEqual(
                ret, 0, "Failed to create snapshot "
                "%s for volume %s" % (self.snap, self.volname))
            self.snaps_list.append(self.snap)
            g.log.info("Snapshot %s created successfully for volume %s",
                       self.snap, self.volname)
        g.log.info("Snapshot Creation Successful")

        # Activate the snapshots
        g.log.info("Activating snapshots")
        for snap_count in range(1, 5):
            self.snap = "snap%s" % snap_count
            ret, _, _ = snap_activate(self.mnode, self.snap)
            self.assertEqual(ret, 0,
                             ("Failed to activate snapshot %s" % self.snap))
            g.log.info("Snapshot snap%s activated successfully", self.snap)

        # snapshot list
        g.log.info("Starting to list snapshots")
        ret, out, _ = snap_list(self.mnode)
        self.assertEqual(ret, 0, "Failed to list snapshot")
        snap_count = out.strip().split("\n")
        self.assertEqual(len(snap_count), 4, "Failed to list all snaps")
        g.log.info("Snapshot list Validated successfully")

        # Enable USS
        g.log.info("Enable USS on volume")
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable USS on cloned volume")
        g.log.info("Successfully enabled USS on Cloned volume")

        # Validate USS running
        g.log.info("Validating USS enabled or disabled")
        ret = is_uss_enabled(self.mnode, self.volname)
        self.assertTrue(ret, ("USS is disabled in volume %s" % self.volname))
        g.log.info("USS enabled in Volume %s", self.volname)

        # Validate snapshots under .snaps folder
        self.validate_snaps()

        # check snapshots are listed
        g.log.info(".snaps Containing:")
        for mount_obj in self.mounts:
            ret, _, _ = uss_list_snaps(mount_obj.client_system,
                                       mount_obj.mountpoint)
            self.assertEqual(ret, 0, "Failed to list snapshot information")
            g.log.info("Successfully Listed snapshots Created")

        # Disable USS running
        g.log.info("Disable USS on volume")
        ret, _, _ = disable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to disable USS on volume")
        g.log.info("Successfully disabled USS on volume")

        # check snapshots are listed
        g.log.info(".snaps Containing:")
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(ret, 0, "Failed to list snapshot information")
            g.log.info("Successfully listed snapshots Created")

        # Validate after disabling USS, all files should be same
        g.log.info("Validate files after disabling uss")
        after_uss_disable = out.strip().split('\n')
        # deleting the mount path from list
        del after_uss_disable[0]
        for files in before_uss_enable:
            self.assertIn(files, after_uss_disable,
                          "Files are Same under .snaps")
        g.log.info("Validated files under .snaps directory")