示例#1
0
    def test_heketi_volume_snapshot_create_with_one_brick_down(self):
        """
        Test heketi volume snapshot create with one brick down
        """
        h_vol_size = 1
        self.node = self.ocp_master_node[0]
        snap_name = 'snap_creation_test_with_one_brick_down'
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        h_vol_info = heketi_volume_create(h_node, h_url, h_vol_size, json=True)
        self.addCleanup(heketi_volume_delete, h_node, h_url, h_vol_info["id"])
        h_volume_name = h_vol_info["name"]
        pids_before = self._get_bricks_pids(h_volume_name)
        self.assertTrue(
            pids_before,
            "Failed to get the brick process for volume {}".format(
                h_volume_name))

        # kill only one brick process
        cmd = "kill -9 {}".format(pids_before[0][1])
        cmd_run_on_gluster_pod_or_node(self.node, cmd, pids_before[0][0])
        pids_after = self._get_bricks_pids(h_volume_name)
        self.assertTrue(
            pids_after, "Failed to get the brick process for volume {}".format(
                h_volume_name))
        self.assertTrue(
            pids_after[0][1],
            "Failed to kill brick process {} on brick {}".format(
                pids_before[0][1], pids_after[0][0]))

        # Get the snapshot list
        ret, out, err = snap_list('auto_get_gluster_endpoint')
        self.assertFalse(
            ret, "Failed to list snapshot from gluster side due to error"
            " {}".format(err))
        snap_list_before = out.split("\n")
        ret, out, err = snap_create('auto_get_gluster_endpoint',
                                    h_volume_name,
                                    snap_name,
                                    timestamp=False)
        exp_err_msg = "Snapshot command failed\n"
        self.assertTrue(
            ret, "Failed to run snapshot create cmd from gluster side "
            "with error {}".format(err))
        self.assertEqual(
            out, exp_err_msg,
            "Expecting error msg {} and {} to match".format(out, exp_err_msg))

        # Check for count after snapshot creation
        ret, out, err = snap_list('auto_get_gluster_endpoint')
        self.assertFalse(
            ret,
            "Failed to list snapshot from gluster with error {}".format(err))
        snap_list_after = out.split("\n")
        self.assertEqual(
            snap_list_before, snap_list_after,
            "Expecting Snapshot count before {} and after creation {} to be "
            "same".format(snap_list_before, snap_list_after))
示例#2
0
    def test_heketi_volume_snapshot_delete(self):
        """Test heketi volume snapshot delete operation"""
        h_volume_size = 1
        snap_name = 'snap_test_heketi_volume_snapshot_create_1'
        h_node, h_url = self.heketi_client_node, self.heketi_server_url

        h_volume_info = heketi_volume_create(h_node,
                                             h_url,
                                             h_volume_size,
                                             json=True)
        self.addCleanup(heketi_volume_delete, h_node, h_url,
                        h_volume_info["id"])

        # Get the snapshot list before snap creation
        snap_list_before = get_snap_list('auto_get_gluster_endpoint')
        self.assertIsNotNone(
            snap_list_before,
            "Failed to get the snapshot list {}".format(snap_list_before))

        # Create a snapshot
        h_volume_name = h_volume_info["name"]
        ret, _, err = snap_create('auto_get_gluster_endpoint',
                                  h_volume_name,
                                  snap_name,
                                  timestamp=False)
        self.addCleanup(podcmd.GlustoPod()(snap_delete),
                        "auto_get_gluster_endpoint", snap_name)
        self.assertFalse(
            ret, "Failed to create snapshot {} for heketi volume {} with"
            " error {}".format(snap_name, h_volume_name, err))

        snap_list = get_snap_list('auto_get_gluster_endpoint')
        self.assertIsNotNone(
            snap_list, "Failed to get the snapshot list {}".format(snap_list))
        self.assertIn(
            snap_name, snap_list,
            "Heketi volume snapshot {} not found in {}".format(
                snap_name, snap_list))

        # Delete the snapshot
        ret, _, err = snap_delete('auto_get_gluster_endpoint', snap_name)
        self.assertFalse(
            ret, "Failed to delete snapshot {} for heketi volume with err {}".
            format(snap_name, err))

        # Check for count after snapshot deletion
        snap_list_after = get_snap_list('auto_get_gluster_endpoint')
        self.assertIsNotNone(
            snap_list_after,
            "Failed to get the snapshot list {}".format(snap_list_after))
        self.assertEqual(
            snap_list_before, snap_list_after,
            "Expecting Snapshot count before {} and after creation {} to be "
            "same".format(snap_list_before, snap_list_after))
    def test_peer_probe_snapd_running(self):
        '''
        -> Create Volume
        -> Create snap for that volume
        -> Enable uss
        -> Check snapd running or not
        -> Probe a new node while snapd is running
        '''

        # Performing node detach, Here detached node considering as extra
        # server
        extra_node = self.servers[-1]
        ret, _, _ = peer_detach(self.mnode, extra_node)
        self.assertEqual(ret, 0, "Peer detach failed for %s" % extra_node)
        g.log.info("Peer detach success for %s", extra_node)

        # Removing detached node from 'self.servers' list, it's because of
        # 'self.setup_volume' function checking peer status of 'self.servers'
        # list before creating volume
        self.servers.remove(extra_node)

        # Creating volume
        ret = self.setup_volume()
        self.assertTrue(ret, "Failed Create volume %s" % self.volname)
        g.log.info("Volume created successfully %s", self.volname)

        # Adding node back into self.servers list
        self.servers.append(extra_node)

        # creating Snap
        ret, _, _ = snap_create(self.mnode, self.volname, 'snap1')
        self.assertEqual(ret, 0,
                         "Snap creation failed for volume %s" % self.volname)
        g.log.info("Snap created successfully for volume %s", self.volname)

        # Enabling Snapd(USS)
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0,
                         "Failed to enable USS for volume %s" % self.volname)
        g.log.info("USS Enabled successfully on volume %s", self.volname)

        # Checking snapd running or not
        ret = is_snapd_running(self.mnode, self.volname)
        self.assertTrue(ret, "Snapd not running for volume %s" % self.volname)
        g.log.info("snapd running for volume %s", self.volname)

        # Probing new node
        ret = peer_probe_servers(self.mnode, extra_node)
        self.assertTrue(ret,
                        "Peer Probe failed for new server %s" % extra_node)
        g.log.info("Peer Probe success for new server %s", extra_node)
    def test_snap_info_glusterd_restart(self):
        """
        Verify snapshot info before and after glusterd restart

        * Create multiple snapshots
        * Check snapshot info
          - Without using snapname or volname
          - Using snapname
          - Using volname
        * Restart glusterd on all servers
        * Repeat the snapshot info step for all the three scenarios
          mentioned above
        """

        # pylint: disable=too-many-statements
        # Create snapshots with description
        for snap in self.snapshots:
            ret, _, _ = snap_create(self.mnode,
                                    self.volname,
                                    snap,
                                    description='$p3C!@l C#@R@cT#R$')
            self.assertEqual(
                ret, 0,
                ("Failed to create snapshot for volume %s" % self.volname))
            g.log.info("Snapshot %s created successfully for volume %s", snap,
                       self.volname)

        # Perform the snapshot info tests before glusterd restart
        self.snapshot_info()

        # Restart Glusterd on all servers
        for server in self.servers:
            ret = restart_glusterd(server)
            self.assertTrue(ret,
                            ("Failed to restart glusterd on node %s" % server))
            g.log.info("Successfully restarted glusterd on node %s", server)

        # Wait for glusterd to be online and validate glusterd running on all
        # server nodes
        self.assertTrue(
            wait_for_glusterd_to_start(self.servers),
            "Unexpected: glusterd not up on one or more of the nodes")
        g.log.info("Glusterd is up and running on all nodes")

        # Check if peers are connected
        self.assertTrue(wait_for_peers_to_connect(self.mnode, self.servers),
                        "Unexpected: Peers are not in connected state")
        g.log.info("Successful: All peers are in connected state")

        # perform the snapshot info tests after glusterd restart
        self.snapshot_info()
示例#5
0
    def test_snap_info(self):
        """
        1. Create volumes
        2. create multiple snapshots
        3. Check snapshot info for snapshots created
           using snap name, using volume name and
           without using snap name and volume name
        """
        # pylint: disable=too-many-statements
        # Creating snapshot with description
        g.log.info("Starting to Create snapshot")
        for count in range(0, 2):
            self.snap = "snap%s" % count
            ret, _, _ = snap_create(self.mnode, self.volname,
                                    self.snap,
                                    description='$p3C!@l C#@R@cT#R$')
            self.assertEqual(ret, 0, ("Failed to create snapshot for volume %s"
                                      % self.volname))
        g.log.info("Snapshot %s created successfully"
                   " for volume %s", self.snap, self.volname)

        # Check snapshot info using snap name
        g.log.info("Checking snapshot info using snap name")
        snap_info_chk = get_snap_info_by_snapname(self.mnode,
                                                  self.snap1)
        self.assertIsNotNone(snap_info_chk, "Failed to get snap info")
        self.assertEqual(snap_info_chk['name'], "%s" % self.snap1, "Failed "
                         "to show snapshot info for %s"
                         % self.snap1)
        g.log.info("Successfully checked snapshot info for %s", self.snap1)

        # Check snapshot info using volname
        g.log.info("Checking snapshot info using volname")
        snap_vol_info = get_snap_info_by_volname(self.mnode, self.volname)
        self.assertIsNotNone(snap_vol_info, "Failed to get snap info")
        self.assertEqual(snap_vol_info['originVolume']['name'], "%s"
                         % self.volname,
                         "Failed to show snapshot info for %s"
                         % self.volname)
        g.log.info("Successfully checked snapshot info for %s", self.volname)

        # Validate snapshot information
        g.log.info("Validating snapshot information")
        info_snaps = get_snap_info(self.mnode)
        self.assertIsNotNone(info_snaps, "Failed to get snap info")
        for snap in range(0, 2):
            self.assertEqual(info_snaps[snap]['name'], "snap%s" % snap,
                             "Failed to validate"
                             "snap information")
        g.log.info("Successfully Validated snap Information")
示例#6
0
        def create_snap(value, volname, snap, clone, counter):
            # Creating snapshots
            g.log.info("Starting to Create snapshot")
            for snap_count in value:
                ret, _, _ = snap_create(self.mnode, volname,
                                        "snap%s" % snap_count)
                self.assertEqual(ret, 0, ("Failed to create "
                                          "snapshot for volume %s" % volname))
                g.log.info(
                    "Snapshot snap%s created successfully"
                    " for volume %s", snap_count, volname)

            # Validate snapshot list
            g.log.info("Starting to list all snapshots")
            ret, out, _ = snap_list(self.mnode)
            self.assertEqual(
                ret, 0, ("Failed to list snapshot of volume %s" % volname))
            v_list = out.strip().split('\n')
            self.assertEqual(len(v_list), counter, "Failed to validate "
                             "all snapshots")
            g.log.info(
                "Snapshot listed and  Validated for volume %s"
                " successfully", volname)
            if counter == 40:
                return 0

            # Creating a Clone of snapshot:
            g.log.info("Starting to Clone Snapshot")
            ret, _, _ = snap_clone(self.mnode, snap, clone)
            self.assertEqual(ret, 0, "Failed to clone %s" % clone)
            g.log.info("Clone volume %s created successfully", clone)

            # Start cloned volumes
            g.log.info("starting to Validate clone volumes are started")
            ret, _, _ = volume_start(self.mnode, clone)
            self.assertEqual(ret, 0, "Failed to start %s" % clone)
            g.log.info("%s started successfully", clone)

            # log Cloned Volume information
            g.log.info("Logging Volume info and Volume status")
            ret = log_volume_info_and_status(self.mnode, clone)
            self.assertTrue("Failed to Log Info and Status of Volume %s" %
                            clone)
            g.log.info("Successfully Logged Info and Status")
            return counter + 10
    def test_snap_info(self):
        """
        1. Create volumes
        2. create multiple snapshots
        3. Check snapshot info for snapshots created
           using snap name, using volume name and
           without using snap name and volume name
        4. restart glusterd
        5. follow step 3
        """

        # pylint: disable=too-many-statements
        # Creating snapshot with description
        g.log.info("Starting to Create snapshot")
        for count in range(0, 2):
            self.snap = "snap%s" % count
            ret, _, _ = snap_create(self.mnode,
                                    self.volname,
                                    self.snap,
                                    description='$p3C!@l C#@R@cT#R$')
            self.assertEqual(
                ret, 0,
                ("Failed to create snapshot for volume %s" % self.volname))
        g.log.info("Snapshot %s created successfully"
                   " for volume %s", self.snap, self.volname)
        self.snapshot_info()

        # Restart Glusterd on all node
        g.log.info("Restarting Glusterd on all node")
        ret = restart_glusterd(self.servers)
        self.assertTrue(ret, "Failed to stop glusterd")
        g.log.info("Successfully stopped glusterd on all node")

        # Check Glusterd status
        g.log.info("Check glusterd running or not")
        ret = is_glusterd_running(self.servers)
        self.assertEqual(ret, 0, "glusterd running on node ")
        g.log.info("glusterd is not running")

        self.snapshot_info()
示例#8
0
    def test_volume_create_snapshot_enabled(self):
        """Validate volume creation with snapshot enabled"""
        factor = 1.5
        vol_create_info = heketi_ops.heketi_volume_create(
            self.heketi_client_node,
            self.heketi_server_url,
            1,
            snapshot_factor=factor,
            json=True)
        self.addCleanup(heketi_ops.heketi_volume_delete,
                        self.heketi_client_node, self.heketi_server_url,
                        vol_create_info["id"])
        snap_factor_count = vol_create_info["snapshot"]["factor"]
        self.assertEqual(
            snap_factor_count, factor, "snapshot factor %s is not same as %s" %
            (snap_factor_count, factor))

        vol_name, snap_name = vol_create_info["name"], "snap1"
        try:
            ret, out, err = snap_ops.snap_create('auto_get_gluster_endpoint',
                                                 vol_name,
                                                 snap_name,
                                                 timestamp=False)
            self.assertEqual(ret, 0,
                             "Failed to create snapshot %s" % snap_name)

            # Get gluster volume info
            gluster_vol = volume_ops.get_volume_info(
                'auto_get_gluster_endpoint', volname=vol_name)
            self.assertTrue(gluster_vol,
                            "Failed to get volume '%s' info" % vol_name)
            self.assertEqual(
                gluster_vol[vol_name]['snapshotCount'], "1",
                "Failed to get snapshot count for volume %s" % vol_name)
        finally:
            ret, out, err = snap_ops.snap_delete('auto_get_gluster_endpoint',
                                                 snap_name)
            self.assertEqual(ret, 0,
                             "Failed to delete snapshot %s" % snap_name)
示例#9
0
    def test_entry_transaction_crash_consistency_rename(self):
        """
        Test entry transaction crash consistency : rename

        Description:
        - Create IO of 50 files
        - Rename 20 files
        - Calculate arequal before creating snapshot
        - Create snapshot
        - Rename 20 files more
        - Stop the volume
        - Restore snapshot
        - Start the volume
        - Get arequal after restoring snapshot
        - Compare arequals
        """

        # Creating files on client side
        count = 1
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s create_files "
                   "--base-file-name %d -f 25 %s"
                   % (self.script_upload_path,
                      count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system, cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
            count = count + 10

        # Wait for IO to complete
        ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed to complete on some of the clients")
        self.io_validation_complete = True
        g.log.info("IO is successful on all mounts")

        # Rename files
        self.all_mounts_procs, self.io_validation_complete = [], False
        cmd = ("/usr/bin/env python %s mv -s FirstRename %s"
               % (self.script_upload_path,
                  self.mounts[0].mountpoint))
        proc = g.run_async(self.mounts[0].client_system, cmd,
                           user=self.mounts[0].user)
        self.all_mounts_procs.append(proc)

        # Wait for IO to complete
        ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts[0])
        self.assertTrue(ret, "IO failed to complete on some of the clients")
        self.io_validation_complete = True
        g.log.info("IO is successful on all mounts")

        # Get arequal before creating snapshot
        ret, result_before_snapshot = collect_mounts_arequal(self.mounts[0])
        self.assertTrue(ret, "Collecting arequal-checksum failed")

        # Create snapshot
        snapshot_name = ('entry_transaction_crash_consistency_rename-%s-%s'
                         % (self.volname, self.mount_type))
        ret, _, err = snap_create(self.mnode, self.volname, snapshot_name)
        self.assertEqual(ret, 0, err)
        g.log.info("Snapshot %s created successfully", snapshot_name)

        # Rename files
        self.all_mounts_procs, self.io_validation_complete = [], False
        cmd = ("/usr/bin/env python %s mv -s SecondRename %s"
               % (self.script_upload_path,
                  self.mounts[0].mountpoint))
        proc = g.run_async(self.mounts[0].client_system, cmd,
                           user=self.mounts[0].user)
        self.all_mounts_procs.append(proc)

        # Wait for IO to complete
        ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts[0])
        self.assertTrue(ret, "IO failed to complete on some of the clients")
        self.io_validation_complete = True
        g.log.info("IO is successful on all mounts")

        # Restore snapshot
        ret = snap_restore_complete(self.mnode, self.volname,
                                    snapshot_name)
        self.assertTrue(ret, 'Failed to restore snapshot %s'
                        % snapshot_name)
        g.log.info("Snapshot %s restored successfully", snapshot_name)

        # Check if heal is completed
        ret = is_heal_complete(self.mnode, self.volname)
        self.assertTrue(ret, 'Heal is not complete')
        g.log.info('Heal is completed successfully')

        # Wait for volume graph to get loaded.
        sleep(10)

        # Get arequal after restoring snapshot
        ret, result_after_restoring = collect_mounts_arequal(self.mounts[0])
        self.assertTrue(ret, "Collecting arequal-checksum failed")

        # Checking arequal before creating snapshot
        # and after restoring snapshot
        self.assertEqual(result_before_snapshot, result_after_restoring,
                         'Checksums are not equal')
        g.log.info('Checksums are equal')
    def test_snap_rebalance(self):
        # pylint: disable=too-many-statements, too-many-locals
        """

        Snapshot rebalance contains tests which verifies snapshot clone,
        creating snapshot and performing I/O on mountpoints

        Steps:

        1. Create snapshot of a volume
        2. Activate snapshot
        3. Clone snapshot and Activate
        4. Mount Cloned volume
        5. Perform I/O on mount point
        6. Calculate areequal for bricks and mountpoints
        7. Add-brick more brick to cloned volume
        8. Initiate Re-balance
        9. validate areequal of bricks and mountpoints
        """

        # Creating snapshot:
        g.log.info("Starting to Create snapshot")
        ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
        self.assertEqual(
            ret, 0, ("Failed to create snapshot for volume %s" % self.volname))
        g.log.info("Snapshot %s created successfully for volume %s", self.snap,
                   self.volname)

        # Activating snapshot
        g.log.info("Starting to Activate Snapshot")
        ret, _, _ = snap_activate(self.mnode, self.snap)
        self.assertEqual(ret, 0,
                         ("Failed to Activate snapshot %s" % self.snap))
        g.log.info("Snapshot %s activated successfully", self.snap)

        # Creating a Clone of snapshot:
        g.log.info("creating Clone Snapshot")
        ret, _, _ = snap_clone(self.mnode, self.snap, self.clone)
        self.assertEqual(ret, 0, ("Failed to clone volume %s" % self.clone))
        g.log.info("clone volume %s created successfully", self.clone)

        # Starting clone volume
        g.log.info("starting clone volume")
        ret, _, _ = volume_start(self.mnode, self.clone)
        self.assertEqual(ret, 0, "Failed to start %s" % self.clone)
        g.log.info("clone volume %s started successfully", self.clone)

        # Mounting a clone volume
        g.log.info("Mounting created clone volume")
        ret, _, _ = mount_volume(self.clone, self.mount_type, self.mount1,
                                 self.mnode, self.clients[0])
        self.assertEqual(ret, 0,
                         "clone Volume mount failed for %s" % self.clone)
        g.log.info("cloned volume %s mounted Successfully", self.clone)

        # Validate clone volume mounted or not
        g.log.info("Validate clone volume mounted or not")
        ret = is_mounted(self.clone, self.mount1, self.mnode, self.clients[0],
                         self.mount_type)
        self.assertTrue(
            ret, "Cloned Volume not mounted on mount point: %s" % self.mount1)
        g.log.info("Cloned Volume %s mounted on %s", self.clone, self.mount1)

        # write files to mountpoint
        g.log.info("Starting IO on %s mountpoint...", self.mount1)
        all_mounts_procs = []
        cmd = ("/usr/bin/env python %s create_files "
               "-f 10 --base-file-name file %s" %
               (self.script_upload_path, self.mount1))
        proc = g.run(self.clients[0], cmd)
        all_mounts_procs.append(proc)

        self.check_arequal()

        # expanding volume
        g.log.info("Starting to expand volume")
        ret = expand_volume(self.mnode, self.volname, self.servers,
                            self.all_servers_info)
        self.assertTrue(ret, "Failed to expand volume %s" % self.clone)
        g.log.info("Expand volume successful")

        ret, _, _ = rebalance_start(self.mnode, self.clone)
        self.assertEqual(ret, 0, "Failed to start rebalance")
        g.log.info("Successfully started rebalance on the "
                   "volume %s", self.clone)

        # Log Rebalance status
        g.log.info("Log Rebalance status")
        _, _, _ = rebalance_status(self.mnode, self.clone)

        # Wait for rebalance to complete
        g.log.info("Waiting for rebalance to complete")
        ret = wait_for_rebalance_to_complete(self.mnode, self.clone)
        self.assertTrue(ret, ("Rebalance is not yet complete "
                              "on the volume %s", self.clone))
        g.log.info("Rebalance is successfully complete on "
                   "the volume %s", self.clone)

        # Check Rebalance status after rebalance is complete
        g.log.info("Checking Rebalance status")
        ret, _, _ = rebalance_status(self.mnode, self.clone)
        self.assertEqual(ret, 0, ("Failed to get rebalance status for "
                                  "the volume %s", self.clone))
        g.log.info("Successfully got rebalance status of the "
                   "volume %s", self.clone)

        self.check_arequal()
示例#11
0
    def test_validate_snaps_restore(self):
        # pylint: disable=too-many-statements
        # Start IO on all mounts.
        all_mounts_procs = []
        count = 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Setting some volume option related to snapshot
        option_before_restore = {
            'volumeConfig': [{
                'softLimit': '100',
                'effectiveHardLimit': '200',
                'hardLimit': '256'
            }],
            'systemConfig': {
                'softLimit': '90%',
                'activateOnCreate': 'disable',
                'hardLimit': '256',
                'autoDelete': 'disable'
            }
        }
        ret = set_snap_config(self.mnode, option_before_restore)
        self.assertTrue(ret,
                        ("Failed to set vol option on  %s" % self.volname))
        g.log.info("Volume options for%s is set successfully", self.volname)

        # Get brick list before taking snap_restore
        bricks_before_snap_restore = get_all_bricks(self.mnode, self.volname)
        g.log.info("Brick List before snap restore "
                   "volume: %s", bricks_before_snap_restore)

        # Creating snapshot
        ret = snap_create(self.mnode, self.volname, "snap1")
        self.assertTrue(ret,
                        ("Failed to create snapshot for %s" % self.volname))
        g.log.info("Snapshot snap1 created successfully for volume  %s",
                   self.volname)

        # Again start IO on all mounts.
        all_mounts_procs = []
        count = 1000
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Reset volume to make sure volume options will reset
        ret = volume_reset(self.mnode, self.volname, force=False)
        self.assertTrue(ret, ("Failed to reset %s" % self.volname))
        g.log.info("Reset Volume %s is Successful", self.volname)

        # Removing one brick
        g.log.info("Starting volume shrink")
        ret = shrink_volume(self.mnode, self.volname, force=True)
        self.assertTrue(ret, ("Failed to shrink the volume on "
                              "volume %s", self.volname))
        g.log.info("Shrinking volume is successful on "
                   "volume %s", self.volname)

        # Restore snapshot
        ret = snap_restore_complete(self.mnode, self.volname, "snap1")
        self.assertTrue(ret, ("Failed to restore snap snap1 on the "
                              "volume %s", self.volname))
        g.log.info(
            "Restore of volume is successful from snap1 on "
            "volume  %s", self.volname)

        # Validate volume is up and running
        g.log.info("Verifying volume is up and process are online")
        ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
        self.assertTrue(
            ret, ("Volume %s : All process are not online", self.volname))
        g.log.info("Volume %s : All process are online", self.volname)

        # Get volume options post restore
        option_after_restore = get_snap_config(self.mnode)
        # Compare volume options
        self.assertNotEqual(option_before_restore, option_after_restore,
                            "Volume Options are not same after snap restore")

        # Get brick list post restore
        bricks_after_snap_restore = get_all_bricks(self.mnode, self.volname)
        g.log.info("Brick List after snap restore "
                   "volume: %s", bricks_after_snap_restore)
        # Compare brick_list
        self.assertNotEqual(bricks_before_snap_restore,
                            bricks_after_snap_restore,
                            "Bricks are not same after snap restore")

        # Creating snapshot
        ret = snap_create(self.mnode, self.volname, "snap2")
        self.assertTrue(ret,
                        ("Failed to create snapshot for %s" % self.volname))
        g.log.info("Snapshot snap2 created successfully for volume  %s",
                   self.volname)

        # Again start IO on all mounts after restore
        all_mounts_procs = []
        count = 1000
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")
示例#12
0
    def test_snap_auto_delete(self):
        """
        Verifying snapshot auto-delete config option

        * Enable auto-delete snapshot
        * Set snap-max-hard limit and snap-max-soft-limit
        * Validate snap-max-hard-limit and snap-max-soft-limit
        * Verify the limits by creating another 20 snapshots
        * Oldest of newly created snapshots will be deleted
        * Retaining the latest 8 (softlimit) snapshots
        * Cleanup snapshots and volumes
        """

        # pylint: disable=too-many-statements
        # Enable auto-delete snapshot config option
        ret, _, _ = set_snap_config(self.mnode, self.autodel_enable)
        self.assertEqual(ret, 0, ("Failed to enable auto-delete snapshot "
                                  "config option on volume %s", self.volname))
        g.log.info("Successfully enabled snapshot auto-delete")

        # Set snap-max-hard-limit snapshot config option for volume
        max_hard_limit = {'snap-max-hard-limit': '10'}
        ret, _, _ = set_snap_config(self.mnode, max_hard_limit, self.volname)
        self.assertEqual(ret, 0, ("Failed to set snap-max-hard-limit"
                                  "config option for volume %s", self.volname))
        g.log.info(
            "Successfully set snap-max-hard-limit config option for"
            "volume %s", self.volname)

        # Validate snap-max-hard-limit snapshot config option
        hard_limit_val = get_snap_config(self.mnode)
        self.assertEqual(hard_limit_val['volumeConfig'][0]['hardLimit'], '10',
                         ("Failed to Validate snap-max-hard-limit"))
        g.log.info("Successfully validated snap-max-hard-limit")

        # Set snap-max-soft-limit snapshot config option
        max_soft_limit = {'snap-max-soft-limit': '80'}
        ret, _, _ = set_snap_config(self.mnode, max_soft_limit)
        self.assertEqual(ret, 0, ("Failed to set snap-max-soft-limit"
                                  "config option"))
        g.log.info("Successfully set snap-max-soft-limit config option")

        # Validate snap-max-soft-limit snapshot config option
        soft_limit_val = get_snap_config(self.mnode)
        self.assertEqual(soft_limit_val['volumeConfig'][0]['softLimit'], '8',
                         ("Failed to Validate max-soft-limit"))
        g.log.info("Successfully validated snap-max-soft-limit")

        # Create 20 snapshots. As the count of snapshots crosses the
        # soft-limit the oldest of newly created snapshot should
        # be deleted and only the latest 8 snapshots must remain.
        for snapname in self.snapshots:
            ret, _, _ = snap_create(self.mnode,
                                    self.volname,
                                    snapname,
                                    description="This is the Description wit#"
                                    " ($p3c1al) ch@r@cters!")
            self.assertEqual(ret, 0, ("Failed to create snapshot %s for "
                                      "volume %s", snapname, self.volname))
            g.log.info("Snapshot snap%s of volume %s created successfully",
                       snapname, self.volname)

        # Perform snapshot list to get total number of snaps after auto-delete
        # Validate the existence of the snapshots using the snapname
        snaplist = get_snap_list(self.mnode)
        self.assertEqual(len(snaplist), 8,
                         ("Failed: The snapshot count is not as expected"))
        for snapname in self.snapshots[-8:]:
            self.assertIn(
                snapname, snaplist, "Failed to validate snapshot "
                "existence for the snapshot %s" % snapname)
        g.log.info("Successful in validating the Snapshot count and existence "
                   "by snapname")
示例#13
0
    def test_ec_uss_snapshot(self):
        """
        - Start resource consumption tool
        - Create directory dir1
        - Create 5 directory and 5 files in dir of mountpoint
        - Rename all files inside dir1 at mountpoint
        - Create softlink and hardlink of files in dir1 of mountpoint
        - Delete op for deleting all file in one of the dirs inside dir1
        - Create tiny, small, medium and large file
        - Create IO's
        - Enable USS
        - Create a Snapshot
        - Activate Snapshot
        - List snapshot and the contents inside snapshot
        - Delete Snapshot
        - Create Snapshot with same name
        - Activate Snapshot
        - List snapshot and the contents inside snapshot
        - Validating IO's and waiting for it to complete
        - Close connection and check file exist for memory log
        """
        # pylint: disable=too-many-branches,too-many-statements,too-many-locals
        # Starting resource consumption using top
        log_file_mem_monitor = '/var/log/glusterfs/mem_usage.log'
        cmd = ("for i in {1..20};do top -n 1 -b|egrep "
               "'RES|gluster' & free -h 2>&1 >> %s ;"
               "sleep 10;done" % (log_file_mem_monitor))
        g.log.info(cmd)
        cmd_list_procs = []
        for server in self.servers:
            proc = g.run_async(server, cmd)
            cmd_list_procs.append(proc)

        # Creating dir1
        ret = mkdir(self.mounts[0].client_system,
                    "%s/dir1" % self.mounts[0].mountpoint)
        self.assertTrue(ret, "Failed to create dir1")
        g.log.info("Directory dir1 on %s created successfully", self.mounts[0])

        # Create 5 dir and 5 files in each dir at mountpoint on dir1
        start, end = 1, 5
        for mount_obj in self.mounts:
            # Number of dir and files to be created.
            dir_range = ("%s..%s" % (str(start), str(end)))
            file_range = ("%s..%s" % (str(start), str(end)))
            # Create dir 1-5 at mountpoint.
            ret = mkdir(mount_obj.client_system,
                        "%s/dir1/dir{%s}" % (mount_obj.mountpoint, dir_range))
            self.assertTrue(ret, "Failed to create directory")
            g.log.info("Directory created successfully")

            # Create files inside each dir.
            cmd = ('touch %s/dir1/dir{%s}/file{%s};' %
                   (mount_obj.mountpoint, dir_range, file_range))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "File creation failed")
            g.log.info("File created successfull")

            # Increment counter so that at next client dir and files are made
            # with diff offset. Like at next client dir will be named
            # dir6, dir7...dir10. Same with files.
            start += 5
            end += 5

        # Rename all files inside dir1 at mountpoint on dir1
        cmd = ('cd %s/dir1/dir1/; '
               'for FILENAME in *;'
               'do mv $FILENAME Unix_$FILENAME;'
               'done;' % self.mounts[0].mountpoint)
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Failed to rename file on " "client")
        g.log.info("Successfully renamed file on client")

        # Truncate at any dir in mountpoint inside dir1
        # start is an offset to be added to dirname to act on
        # diff files at diff clients.
        start = 1
        for mount_obj in self.mounts:
            cmd = ('cd %s/dir1/dir%s/; '
                   'for FILENAME in *;'
                   'do echo > $FILENAME;'
                   'done;' % (mount_obj.mountpoint, str(start)))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Truncate failed")
            g.log.info("Truncate of files successfull")

        # Create softlink and hardlink of files in mountpoint. Start is an
        # offset to be added to dirname to act on diff files at diff clients.
        start = 1
        for mount_obj in self.mounts:
            cmd = ('cd %s/dir1/dir%s; '
                   'for FILENAME in *; '
                   'do ln -s $FILENAME softlink_$FILENAME;'
                   'done;' % (mount_obj.mountpoint, str(start)))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Creating Softlinks have failed")
            g.log.info("Softlink of files have been changed successfully")

            cmd = ('cd %s/dir1/dir%s; '
                   'for FILENAME in *; '
                   'do ln $FILENAME hardlink_$FILENAME;'
                   'done;' % (mount_obj.mountpoint, str(start + 1)))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Creating Hardlinks have failed")
            g.log.info("Hardlink of files have been changed successfully")
            start += 5

        # Create tiny, small, medium and large file
        # at mountpoint. Offset to differ filenames
        # at diff clients.
        offset = 1
        for mount_obj in self.mounts:
            cmd = 'fallocate -l 100 tiny_file%s.txt' % str(offset)
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Fallocate for tiny files failed")
            g.log.info("Fallocate for tiny files successfully")

            cmd = 'fallocate -l 20M small_file%s.txt' % str(offset)
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Fallocate for small files failed")
            g.log.info("Fallocate for small files successfully")

            cmd = 'fallocate -l 200M medium_file%s.txt' % str(offset)
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Fallocate for medium files failed")
            g.log.info("Fallocate for medium files successfully")

            cmd = 'fallocate -l 1G large_file%s.txt' % str(offset)
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Fallocate for large files failed")
            g.log.info("Fallocate for large files successfully")
            offset += 1

    # Creating files on client side for dir1
    # Write IO
        all_mounts_procs, count = [], 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s/dir1" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count += 10

        # Enable USS
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable USS on volume")
        g.log.info("Successfully enabled USS on volume")

        # Create Snapshot
        ret, _, _ = snap_create(self.mnode,
                                self.volname,
                                "ec_snap",
                                timestamp=False)
        self.assertEqual(ret, 0, "Failed to create snapshot ec_snap")
        g.log.info("Snapshot ec_snap of volume %s created"
                   "successfully.", self.volname)

        # Activate snapshot
        ret, _, _ = snap_activate(self.mnode, "ec_snap")
        self.assertEqual(ret, 0, "Failed to activate snapshot ec_snap")
        g.log.info("Snapshot activated successfully")

        # List contents inside snaphot and wait before listing
        sleep(5)
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(
                ret, 0, "Directory Listing Failed for"
                " Activated Snapshot")
            self.assertIn(
                "ec_snap", out.split("\n"), "Failed to "
                "validate ec_snap under .snaps directory")
            g.log.info("Activated Snapshot listed Successfully")

        # Delete Snapshot ec_snap
        ret, _, _ = snap_delete(self.mnode, "ec_snap")
        self.assertEqual(ret, 0, "Failed to delete snapshot")
        g.log.info("Snapshot deleted Successfully")

        # Creating snapshot with the same name
        ret, _, _ = snap_create(self.mnode,
                                self.volname,
                                "ec_snap",
                                timestamp=False)
        self.assertEqual(ret, 0, "Failed to create snapshot ec_snap")
        g.log.info("Snapshot ec_snap of volume %s created"
                   "successfully.", self.volname)

        # Activate snapshot ec_snap
        ret, _, _ = snap_activate(self.mnode, "ec_snap")
        self.assertEqual(ret, 0, "Failed to activate snapshot ec_snap")
        g.log.info("Snapshot activated successfully")

        # List contents inside ec_snap and wait before listing
        sleep(5)
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(
                ret, 0, "Directory Listing Failed for"
                " Activated Snapshot")
            self.assertIn(
                "ec_snap", out.split('\n'), "Failed to "
                "validate ec_snap under .snaps directory")
            g.log.info("Activated Snapshot listed Successfully")

        # Validating IO's and waiting to complete
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Close connection and check file exist for memory log
        ret = file_exists(self.mnode, '/var/log/glusterfs/mem_usage.log')
        self.assertTrue(ret, "Unexpected:Memory log file does " "not exist")
        g.log.info("Memory log file exists")
        for proc in cmd_list_procs:
            ret, _, _ = proc.async_communicate()
            self.assertEqual(ret, 0, "Memory logging failed")
            g.log.info("Memory logging is successful")
示例#14
0
    def test_snap_del_original_volume(self):
        # pylint: disable=too-many-statements
        """
        Steps:
        1. Create and mount distributed-replicated volume
        2. Perform I/O on mountpoints
        3. Create snapshot
        4. activate snapshot created in step3
        5. clone created snapshot in step3
        6. delete original volume
        7. Validate clone volume

        """
        # Perform I/O
        all_mounts_procs = []
        g.log.info("Generating data for %s:"
                   "%s", self.mounts[0].client_system,
                   self.mounts[0].mountpoint)
        # Create files
        g.log.info('Creating files...')
        command = ("/usr/bin/env python %s create_files -f 100 "
                   "--fixed-file-size 1k %s" %
                   (self.script_upload_path, self.mounts[0].mountpoint))
        proc = g.run_async(self.mounts[0].client_system,
                           command,
                           user=self.mounts[0].user)
        all_mounts_procs.append(proc)
        self.io_validation_complete = False

        # Validate IO
        self.assertTrue(validate_io_procs(all_mounts_procs, self.mounts[0]),
                        "IO failed on some of the clients")
        self.io_validation_complete = True

        # Creating snapshot
        g.log.info("Starting to Create snapshot")
        ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
        self.assertEqual(ret, 0, ("Failed to create snapshot %s for "
                                  "volume %s" % (self.snap, self.volname)))
        g.log.info("Snapshot %s created successfully for volume "
                   "%s", self.snap, self.volname)

        # Activating snapshot
        g.log.info("Starting to Activate Snapshot")
        ret, _, _ = snap_activate(self.mnode, self.snap)
        self.assertEqual(ret, 0, ("Failed to Activate snapshot "
                                  "%s" % self.snap))
        g.log.info("Snapshot %s activated successfully", self.snap)

        # snapshot list
        g.log.info("getting snapshot list")
        ret, out, _ = snap_list(self.mnode)
        self.assertEqual(
            ret, 0, ("Failed to list snapshot of volume %s" % self.volname))
        self.assertIn(
            self.snap, out, "Failed to validate snapshot"
            " %s in snap list" % self.snap)
        g.log.info("Snapshot list command for volume %s is "
                   "successful", self.volname)

        # Creating a Clone of snapshot:
        g.log.info("Starting to create Clone of Snapshot")
        ret, _, _ = snap_clone(self.mnode, self.snap, self.clone)
        self.assertEqual(ret, 0,
                         ("Failed to create clone volume %s "
                          "from snapshot %s" % (self.clone, self.snap)))
        g.log.info("Clone Volume %s created successfully from snapshot "
                   "%s", self.clone, self.snap)

        # After cloning a volume wait for 5 second to start the volume
        sleep(5)

        # Validate clone volumes are started:
        g.log.info("starting to Validate clone volumes are started")
        ret, _, _ = volume_start(self.mnode, self.clone)
        self.assertEqual(ret, 0, ("Failed to start cloned volume "
                                  "%s" % self.clone))
        g.log.info("Volume %s started successfully", self.clone)

        for mount_obj in self.mounts:
            # Unmount Volume
            g.log.info("Starting to Unmount Volume %s", self.volname)
            ret = umount_volume(mount_obj.client_system,
                                mount_obj.mountpoint,
                                mtype=self.mount_type)
            self.assertTrue(ret,
                            ("Failed to Unmount Volume %s" % self.volname))
        g.log.info("Successfully Unmounted Volume %s", self.volname)

        # Delete original volume
        g.log.info("deleting original volume")
        ret = cleanup_volume(self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to delete volume %s" % self.volname))
        g.log.info("successfully deleted volume %s", self.volname)

        # get volume info
        g.log.info("Getting and validating cloned volume %s", self.clone)
        vol_info = get_volume_info(self.mnode, self.clone)
        self.assertIsNotNone(
            vol_info, "Failed to get volume info "
            "for cloned volume %s" % self.clone)
        self.assertEqual(
            vol_info[self.clone]['statusStr'], 'Started',
            "Unexpected: cloned volume is not started "
            "%s " % self.clone)
        g.log.info("Volume %s is in Started state", self.clone)

        # Volume status
        g.log.info("Getting volume status")
        ret, out, _ = volume_status(self.mnode, self.clone)
        self.assertEqual(ret, 0, "Failed to get volume status for"
                         " %s" % self.clone)
        vol = out.strip().split("\n")
        vol1 = vol[0].strip().split(":")
        self.assertEqual(
            vol1[1], " %s" % self.clone, "Failed to "
            "get volume status for volume %s" % self.clone)
        g.log.info("Volume Status is Successful for %s clone volume",
                   self.clone)

        # Volume list validate
        g.log.info("Starting to list volume")
        ret, vol_list, _ = volume_list(self.mnode)
        self.assertEqual(ret, 0, "Failed to get volume list")
        vol_list1 = vol_list.strip().split("\n")
        self.assertIn(
            "%s" % self.clone, vol_list1, "Failed to validate "
            "volume list for volume %s" % self.clone)
        g.log.info("Volume list validated Successfully for"
                   "volume %s", self.clone)
    def test_snap_delete_and_list_glusterd_down(self):
        # pylint: disable=too-many-statements
        """
        Steps:

        1. create a volume
        2. mount volume
        3. create 3 snapshot of that volume
        4. delete snapshot snap1
        5. list all snapshots created
        6. restart glusterd
        7. list all snapshots created
           except snap1
        """

        # Creating snapshot:
        g.log.info("Starting to Create snapshot")
        for snap_count in range(0, 3):
            self.snap = "snap%s" % snap_count
            ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
            self.assertEqual(ret, 0, ("Failed to create snapshot for "
                                      "volume %s" % self.volname))
            g.log.info("Snapshot %s created successfully "
                       "for volume %s", self.snap, self.volname)

        # delete snap1 snapshot
        g.log.info("Starting to Delete snapshot snap1")
        ret, _, _ = snap_delete(self.mnode, "snap1")
        self.assertEqual(ret, 0, "Failed to delete" "snapshot snap1")
        g.log.info("Snapshots snap1 deleted Successfully")

        # snapshot list
        g.log.info("Starting to list all snapshots")
        out = get_snap_list(self.mnode)
        self.assertIsNotNone(out, "Failed to list all snapshots")
        self.assertEqual(len(out), 2, "Failed to validate snap list")
        g.log.info("Successfully validated snap list")

        # restart Glusterd
        g.log.info("Restarting Glusterd on all nodes")
        ret = restart_glusterd(self.servers)
        self.assertTrue(
            ret, "Failed to restart glusterd on nodes"
            "%s" % self.servers)
        g.log.info("Successfully restarted glusterd on nodes"
                   " %s", self.servers)

        # check glusterd running
        g.log.info("Checking glusterd is running or not")
        count = 0
        while count < 80:
            ret = is_glusterd_running(self.servers)
            if ret == 0:
                break
            time.sleep(2)
            count += 1

        self.assertEqual(
            ret, 0, "Failed to validate glusterd "
            "running on nodes %s" % self.servers)
        g.log.info("glusterd is running on " "nodes %s", self.servers)

        # snapshot list
        g.log.info("Starting to list all snapshots")
        for server in self.servers[0:]:
            out = get_snap_list(server)
            self.assertIsNotNone(out, "Failed to list snap in node"
                                 "%s" % server)
            self.assertEqual(
                len(out), 2, "Failed to validate snap list"
                "on node %s" % server)
            g.log.info("Successfully validated snap list on node %s", server)
    def test_uss_snap_active_deactive(self):

        # pylint: disable=too-many-statements
        """
        Steps:
        * Create volume
        * Mount volume
        * Perform I/O on mounts
        * Create 2 snapshots snapy1 & snapy2
        * Validate snap created
        * Enable USS
        * Validate USS is enabled
        * Validate snapd is running
        * Activate snapy1 & snapy2
        * List snaps under .snap directory
          -- snap1 and snap2 should be listed under .snaps
        * Deactivate snapy2
        * List snaps under .snap directory
          -- snapy2 is not listed as it is deactivated
        * Activate snapy2
        * List snaps under .snap directory
          -- snap1 and snap2 should be listed under .snaps
        """

        # Perform I/O
        g.log.info("Starting IO on all mounts...")
        self.counter = 1
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = (
                "python %s create_deep_dirs_with_files "
                "--dirname-start-num %d "
                "--dir-depth 2 "
                "--dir-length 2 "
                "--max-num-of-dirs 2 "
                "--num-of-files 2 %s" %
                (self.script_upload_path, self.counter, mount_obj.mountpoint))

            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
        self.io_validation_complete = False

        # Validate IO
        g.log.info("Wait for IO to complete and validate IO ...")
        ret = validate_io_procs(self.all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        self.io_validation_complete = True
        g.log.info("I/O successful on clients")

        # Enable USS
        g.log.info("Enable USS on volume")
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable USS on volume")
        g.log.info("Successfully enabled USS on volume")

        # Validate USS is enabled
        g.log.info("Validating USS is enabled")
        ret = is_uss_enabled(self.mnode, self.volname)
        self.assertTrue(ret, "USS is disabled on volume " "%s" % self.volname)
        g.log.info("USS enabled on volume %s", self.volname)

        # Validate snapd running
        for server in self.servers:
            g.log.info("Validating snapd daemon on:%s", server)
            ret = is_snapd_running(server, self.volname)
            self.assertTrue(ret, "Snapd is Not running on " "%s" % server)
            g.log.info("Snapd Running on node: %s", server)

        # Create 2 snapshot
        g.log.info("Creating 2 snapshots for volume %s", self.volname)
        for i in range(1, 3):
            ret, _, _ = snap_create(self.mnode, self.volname, "snapy%s" % i)
            self.assertEqual(
                ret, 0, ("Failed to create snapshot for %s" % self.volname))
            g.log.info("Snapshot %s created successfully for volume  %s",
                       "snapy%s" % i, self.volname)

        # Check for no of snaps using snap_list it should be 2 now
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            2, len(snap_list), "No of snaps not consistent "
            "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snaps.")

        # Activate snapshot snapy1 & snapy2
        g.log.info("Activating snapshot snapy1 & snapy2")
        for i in range(1, 3):
            ret, _, _ = snap_activate(self.mnode, "snapy%s" % i)
            self.assertEqual(ret, 0, "Failed to activate snapshot snapy%s" % i)
        g.log.info("Both snapshots activated successfully")

        # list activated snapshots directory under .snaps
        g.log.info("Listing activated snapshots under .snaps")
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(
                ret, 0, "Directory Listing Failed for"
                " Activated Snapshot")
            validate_dir = out.split('\n')
            self.assertIn(
                "snapy1", validate_dir, "Failed to "
                "validate snapy1 under .snaps directory")
            g.log.info("Activated Snapshot snapy1 listed Successfully")
            self.assertIn(
                "snapy2", validate_dir, "Successfully listed"
                " snapy2 under.snaps directory")
            g.log.info("Expected: De-activated Snapshot not listed")

        # Deactivate snapshot snapy2
        g.log.info("Deactivating snapshot snapy2")
        ret, _, _ = snap_deactivate(self.mnode, "snapy2")
        self.assertEqual(ret, 0, "Failed to deactivate snapshot snapy2")
        g.log.info("Successfully deactivated snapshot snapy2")

        # validate snapy2 should not present in mountpoint
        ret = view_snaps_from_mount(self.mounts, "snapy2")
        self.assertFalse(
            ret, " UnExpected : Still able to View snapy2"
            " from mount ")
        g.log.info("Successfully verified deactivated snapshot "
                   "snapy2 is not listed")

        # Activate snapshot snapy2
        ret, _, _ = snap_activate(self.mnode, "snapy2")
        self.assertEqual(ret, 0, "Failed to activate Snapshot snapy2")
        g.log.info("Snapshot snapy2 activated successfully")

        # list activated snapshots directory under .snaps
        g.log.info("Listing activated snapshots under .snaps")
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(
                ret, 0, "Directory Listing Failed for"
                " Activated Snapshot")
            validate_dir = out.split('\n')
            self.assertIn(
                "snapy1", validate_dir, "Failed to "
                "validate snapy%s under .snaps directory")
            g.log.info("Activated Snapshot listed Successfully")
            self.assertIn(
                "snapy2", validate_dir, "Successfully listed"
                "snapy2 under .snaps directory")
            g.log.info("Expected: De-activated Snapshot not listed")
    def test_snapshot_basic_commands_when_io_in_progress(self):
        """Create, List, Activate, Enable USS (User Serviceable Snapshot),
            Viewing Snap of the volume from mount, De-Activate
            when IO is in progress.
        """
        snap_name = "snap_cvt"
        # Create Snapshot
        g.log.info("Creating snapshot %s of the volume %s", snap_name,
                   self.volname)
        ret, _, _ = snap_create(self.mnode, self.volname, snap_name)
        self.assertEqual(ret, 0,
                         ("Failed to create snapshot with name %s "
                          " of the volume %s", snap_name, self.volname))
        g.log.info("Successfully created snapshot %s of the volume %s",
                   snap_name, self.volname)

        # List Snapshot
        g.log.info("Listing the snapshot created for the volume %s",
                   self.volname)
        snap_list = get_snap_list(self.mnode)
        self.assertIsNotNone(snap_list, "Unable to get the Snapshot list")
        self.assertIn(snap_name, snap_list,
                      ("snapshot %s not listed in Snapshot list", snap_name))
        g.log.info("Successfully listed snapshot %s in gluster snapshot list",
                   snap_name)

        # Activate the snapshot
        g.log.info("Activating snapshot %s of the volume %s", snap_name,
                   self.volname)
        ret, _, _ = snap_activate(self.mnode, snap_name)
        self.assertEqual(ret, 0,
                         ("Failed to activate snapshot with name %s "
                          " of the volume %s", snap_name, self.volname))
        g.log.info("Successfully activated snapshot %s of the volume %s",
                   snap_name, self.volname)

        # Enable USS on the volume.
        uss_options = ["features.uss"]
        if self.mount_type == "cifs":
            uss_options.append("features.show-snapshot-directory")
        g.log.info("Enable uss options %s on the volume %s", uss_options,
                   self.volname)
        ret = enable_and_validate_volume_options(self.mnode,
                                                 self.volname,
                                                 uss_options,
                                                 time_delay=30)
        self.assertTrue(ret, ("Unable to enable uss options %s on volume %s",
                              uss_options, self.volname))
        g.log.info("Successfully enabled uss options %s on the volume: %s",
                   uss_options, self.volname)

        # Viewing snapshot from mount
        g.log.info("Viewing Snapshot %s from mounts:", snap_name)
        ret = view_snaps_from_mount(self.mounts, snap_name)
        self.assertTrue(ret, ("Failed to View snap %s from mounts", snap_name))
        g.log.info("Successfully viewed snap %s from mounts", snap_name)

        # De-Activate the snapshot
        g.log.info("Deactivating snapshot %s of the volume %s", snap_name,
                   self.volname)
        ret, _, _ = snap_deactivate(self.mnode, snap_name)
        self.assertEqual(ret, 0,
                         ("Failed to deactivate snapshot with name %s "
                          " of the volume %s", snap_name, self.volname))
        g.log.info("Successfully deactivated snapshot %s of the volume %s",
                   snap_name, self.volname)

        # Viewing snapshot from mount (.snaps shouldn't be listed from mount)
        for mount_obj in self.mounts:
            g.log.info("Viewing Snapshot %s from mount %s:%s", snap_name,
                       mount_obj.client_system, mount_obj.mountpoint)
            ret = view_snaps_from_mount(mount_obj, snap_name)
            self.assertFalse(ret, ("Still able to View snap %s from mount "
                                   "%s:%s", snap_name, mount_obj.client_system,
                                   mount_obj.mountpoint))
            g.log.info("%s not listed under .snaps from mount %s:%s",
                       snap_name, mount_obj.client_system,
                       mount_obj.mountpoint)
        g.log.info(
            "%s not listed under .snaps from mounts after "
            "deactivating ", snap_name)

        # Validate IO
        g.log.info("Wait for IO to complete and validate IO ...")
        ret = validate_io_procs(self.all_mounts_procs, self.mounts)
        self.io_validation_complete = True
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("IO is successful on all mounts")

        # List all files and dirs created
        g.log.info("List all files and directories:")
        ret = list_all_files_and_dirs_mounts(self.mounts)
        self.assertTrue(ret, "Failed to list all files and dirs")
        g.log.info("Listing all files and directories is successful")
    def test_snap_self_heal(self):
        """
        Steps:

        1. create a volume
        2. mount volume
        3. create snapshot of that volume
        4. Activate snapshot
        5. Clone snapshot and Mount
        6. Perform I/O
        7. Bring Down Few bricks from volume without
           affecting the volume or cluster.
        8. Perform I/O
        9. Bring back down bricks to online
        10. Validate heal is complete with areequal

        """
        # pylint: disable=too-many-statements, too-many-locals
        # Creating snapshot:
        g.log.info("Starting to Create snapshot")
        ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
        self.assertEqual(
            ret, 0, ("Failed to create snapshot for volume %s" % self.volname))
        g.log.info("Snapshot %s created successfully for volume %s", self.snap,
                   self.volname)

        # Activating snapshot
        g.log.info("Starting to Activate Snapshot")
        ret, _, _ = snap_activate(self.mnode, self.snap)
        self.assertEqual(ret, 0,
                         ("Failed to Activate snapshot %s" % self.snap))
        g.log.info("Snapshot %s activated successfully", self.snap)

        # snapshot list
        ret, _, _ = snap_list(self.mnode)
        self.assertEqual(ret, 0, ("Failed to list all the snapshot"))
        g.log.info("Snapshot list command was successful")

        # Creating a Clone volume from snapshot:
        g.log.info("Starting to Clone volume from Snapshot")
        ret, _, _ = snap_clone(self.mnode, self.snap, self.clone)
        self.assertEqual(ret, 0, ("Failed to clone %s from snapshot %s" %
                                  (self.clone, self.snap)))
        g.log.info("%s created successfully", self.clone)

        #  start clone volumes
        g.log.info("start to created clone volumes")
        ret, _, _ = volume_start(self.mnode, self.clone)
        self.assertEqual(ret, 0, "Failed to start clone %s" % self.clone)
        g.log.info("clone volume %s started successfully", self.clone)

        # Mounting a clone volume
        g.log.info("Mounting a clone volume")
        ret, _, _ = mount_volume(self.clone, self.mount_type, self.mount1,
                                 self.mnode, self.clients[0])
        self.assertEqual(ret, 0,
                         "Failed to mount clone Volume %s" % self.clone)
        g.log.info("Clone volume %s mounted Successfully", self.clone)

        # Checking cloned volume mounted or not
        ret = is_mounted(self.clone, self.mount1, self.mnode, self.clients[0],
                         self.mount_type)
        self.assertTrue(
            ret,
            "Failed to mount clone volume on mount point: %s" % self.mount1)
        g.log.info("clone Volume %s mounted on %s", self.clone, self.mount1)

        # write files on all mounts
        g.log.info("Starting IO on all mounts...")
        g.log.info("mounts: %s", self.mount1)
        all_mounts_procs = []
        cmd = ("python %s create_files "
               "-f 10 --base-file-name file %s" %
               (self.script_upload_path, self.mount1))
        proc = g.run(self.clients[0], cmd)
        all_mounts_procs.append(proc)
        g.log.info("Successful in creating I/O on mounts")

        # get the bricks from the volume
        g.log.info("Fetching bricks for the volume : %s", self.clone)
        bricks_list = get_all_bricks(self.mnode, self.clone)
        g.log.info("Brick List : %s", bricks_list)

        # Select bricks to bring offline
        g.log.info("Starting to bring bricks to offline")
        bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
            self.mnode, self.volname))
        bricks_to_bring_offline = filter(
            None, (bricks_to_bring_offline_dict['hot_tier_bricks'] +
                   bricks_to_bring_offline_dict['cold_tier_bricks'] +
                   bricks_to_bring_offline_dict['volume_bricks']))
        g.log.info("Brick to bring offline: %s ", bricks_to_bring_offline)
        ret = bring_bricks_offline(self.clone, bricks_to_bring_offline)
        self.assertTrue(ret, "Failed to bring the bricks offline")
        g.log.info("Successful in bringing bricks: %s offline",
                   bricks_to_bring_offline)

        # Offline Bricks list
        offline_bricks = get_offline_bricks_list(self.mnode, self.clone)
        self.assertIsNotNone(
            offline_bricks, "Failed to get offline bricklist"
            "for volume %s" % self.clone)
        for bricks in offline_bricks:
            self.assertIn(bricks, bricks_to_bring_offline,
                          "Failed to validate "
                          "Bricks offline")
        g.log.info("Bricks Offline: %s", offline_bricks)

        # Online Bricks list
        online_bricks = get_online_bricks_list(self.mnode, self.clone)
        self.assertIsNotNone(
            online_bricks, "Failed to get online bricks"
            " for volume %s" % self.clone)
        g.log.info("Bricks Online: %s", online_bricks)

        # write files mountpoint
        g.log.info("Starting IO on all mounts...")
        g.log.info("mounts: %s", self.mount1)
        all_mounts_procs = []
        cmd = ("python %s create_files "
               "-f 10 --base-file-name file %s" %
               (self.script_upload_path, self.mount1))
        proc = g.run(self.clients[0], cmd)
        all_mounts_procs.append(proc)
        g.log.info("Successful in creating I/O on mounts")

        # Bring all bricks online
        g.log.info("bring all bricks online")
        ret = bring_bricks_online(self.mnode, self.clone,
                                  bricks_to_bring_offline)
        self.assertTrue(ret, "Failed to bring bricks online")
        g.log.info("Successful in bringing all bricks online")

        # Validate Bricks are online
        g.log.info("Validating all bricks are online")
        ret = are_bricks_online(self.mnode, self.clone, bricks_list)
        self.assertTrue(ret, "Failed to bring all the bricks online")
        g.log.info("bricks online: %s", bricks_list)

        # Wait for volume processes to be online
        g.log.info("Wait for volume processes to be online")
        ret = wait_for_volume_process_to_be_online(self.mnode, self.clone)
        self.assertTrue(ret, ("Failed to wait for volume %s processes to "
                              "be online" % self.clone))
        g.log.info(
            "Successful in waiting for volume %s processes to be "
            "online", self.clone)

        # Verify volume's all process are online
        g.log.info("Verifying volume's all process are online")
        ret = verify_all_process_of_volume_are_online(self.mnode, self.clone)
        self.assertTrue(
            ret, ("Volume %s : All process are not online" % self.clone))
        g.log.info("Volume %s : All process are online", self.clone)

        # wait for the heal process to complete
        g.log.info("waiting for heal process to complete")
        ret = monitor_heal_completion(self.mnode, self.volname)
        self.assertTrue(ret, "Failed to complete the heal process")
        g.log.info("Successfully completed heal process")

        # Check areequal
        # get the subvolumes
        g.log.info("Starting to get sub-volumes for volume %s", self.clone)
        subvols = get_subvols(self.mnode, self.clone)
        num_subvols = len(subvols['volume_subvols'])
        g.log.info("Number of subvolumes in volume %s:", num_subvols)

        # Get arequals and compare
        g.log.info("Starting to Compare areequals")
        for i in range(0, num_subvols):
            # Get arequal for first brick
            subvol_brick_list = subvols['volume_subvols'][i]
            node, brick_path = subvol_brick_list[0].split(':')
            command = ('arequal-checksum -p %s '
                       '-i .glusterfs -i .landfill -i .trashcan' % brick_path)
            ret, arequal, _ = g.run(node, command)
            first_brick_total = arequal.splitlines()[-1].split(':')[-1]

        # Get arequal for every brick and compare with first brick
        for brick in subvol_brick_list:
            node, brick_path = brick.split(':')
            command = ('arequal-checksum -p %s '
                       '-i .glusterfs -i .landfill -i .trashcan' % brick_path)
            ret, brick_arequal, _ = g.run(node, command)
            self.assertFalse(ret, 'Failed to get arequal on brick %s' % brick)
            g.log.info('Getting arequal for %s is successful', brick)
            brick_total = brick_arequal.splitlines()[-1].split(':')[-1]
            self.assertEqual(
                first_brick_total, brick_total,
                'Arequals for subvol and %s are not equal' % brick)
            g.log.info('Arequals for subvol and %s are equal', brick)
        g.log.info('All arequals are equal for distributed-replicated')
示例#19
0
    def test_mount_snap_delete(self):
        """
        Mount the snap volume
        * Create volume, FUSE mount the volume
        * perform I/O on mount points
        * Creating snapshot and activate snapshot
        * FUSE mount the snapshot created
        * Perform I/O on mounted snapshot
        * I/O should fail
        """
        # pylint: disable=too-many-statements
        # starting I/O
        g.log.info("Starting IO on all mounts...")
        g.log.info("mounts: %s", self.mounts)
        all_mounts_procs = []
        self.counter = 1
        for mount_obj in self.mounts:
            cmd = (
                "/usr/bin/env python %s create_files "
                "-f 10 --base-file-name file%d %s" %
                (self.script_upload_path, self.counter, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            self.counter += 100

        # Validate I/O
        self.assertTrue(validate_io_procs(all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")

        # Creating snapshot
        g.log.info("Starting to create snapshots")
        ret, _, _ = snap_create(self.mnode, self.volname, "snap1")
        self.assertEqual(ret, 0,
                         ("Failed to create snapshot for %s" % self.volname))
        g.log.info("Snapshot snap1 created successfully "
                   "for volume  %s", self.volname)

        # Activating snapshot
        g.log.info("Activating snapshot")
        ret, _, _ = snap_activate(self.mnode, "snap1")
        self.assertEqual(ret, 0, ("Failed to Activate snapshot snap1"))
        g.log.info("snap1 activated successfully")

        # redefine mounts
        self.mount_points = []
        self.mounts_dict_list = []
        for client in self.all_clients_info:
            mount = {
                'protocol': self.mount_type,
                'server': self.mnode,
                'volname': self.volname,
                'client': self.all_clients_info[client],
                'mountpoint': (os.path.join("/mnt/snap1")),
                'options': ''
            }
            self.mounts_dict_list.append(mount)
        self.mount1 = create_mount_objs(self.mounts_dict_list)
        g.log.info("Successfully made entry in self.mount1")

        # FUSE mount snap1 snapshot
        g.log.info("Mounting snapshot snap1")
        cmd = "mkdir -p  %s" % self.mpoint
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, ("Creation of directory %s"
                                  "for mounting"
                                  "volume snap1 failed" % (self.mpoint)))
        self.mount_points.append(self.mpoint)
        cmd = "mount -t glusterfs %s:/snaps/snap1/%s %s" % (
            self.mnode, self.volname, self.mpoint)
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, ("Failed to mount snap1"))
        g.log.info("snap1 is mounted Successfully")

        # starting I/O
        g.log.info("Starting IO on all mounts...")
        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s create_files "
                   "-f 10 --base-file-name file %s" %
                   (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # Validate I/O
        self.assertTrue(validate_io_procs(all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")

        # start I/O
        g.log.info("Starting IO on all mounts...")
        g.log.info("mounts: %s", self.mount1)
        all_mounts_procs = []
        for mount_obj in self.mount1:
            cmd = ("/usr/bin/env python %s create_files "
                   "-f 10 --base-file-name file %s" %
                   (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # validate io should fail
        self.assertFalse(validate_io_procs(all_mounts_procs, self.mounts),
                         "Unexpected: IO Successful on all clients")
        g.log.info("Expected: IO failed on clients")
示例#20
0
    def test_restore_online_vol(self):

        # pylint: disable=too-many-statements
        """
        Steps:
        1. Create volume
        2. Mount volume
        3. Perform I/O on mounts
        4. Create 1 snapshots snapy1
        5. Validate snap created
        6. Perform some more I/O
        7. Create 1 more snapshot snapy2
        8. Restore volume to snapy1
          -- Restore should fail with message
             "volume needs to be stopped before restore"
        """

        # Performing step 3 to 7 in loop here
        for i in range(1, 3):
            # Perform I/O
            g.log.info("Starting IO on all mounts...")
            self.counter = 1
            self.all_mounts_procs = []
            for mount_obj in self.mounts:
                g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                           mount_obj.mountpoint)
                cmd = ("python %s create_deep_dirs_with_files "
                       "--dirname-start-num %d "
                       "--dir-depth 2 "
                       "--dir-length 2 "
                       "--max-num-of-dirs 2 "
                       "--num-of-files 2 %s" %
                       (self.script_upload_path, self.counter,
                        mount_obj.mountpoint))

                proc = g.run_async(mount_obj.client_system,
                                   cmd,
                                   user=mount_obj.user)
                self.all_mounts_procs.append(proc)
            self.io_validation_complete = False

            # Validate IO
            self.assertTrue(
                validate_io_procs(self.all_mounts_procs, self.mounts),
                "IO failed on some of the clients")
            self.io_validation_complete = True

            # Get stat of all the files/dirs created.
            g.log.info("Get stat of all the files/dirs created.")
            ret = get_mounts_stat(self.mounts)
            self.assertTrue(ret, "Stat failed on some of the clients")
            g.log.info("Successfully got stat of all files/dirs created")

            # Create snapshot
            g.log.info("Creating snapshot for volume %s", self.volname)
            ret, _, _ = snap_create(self.mnode, self.volname, "snapy%s" % i)
            self.assertEqual(
                ret, 0, ("Failed to create snapshot for %s" % self.volname))
            g.log.info("Snapshot created successfully for volume  %s",
                       self.volname)

            # Check for no of snaps using snap_list
            snap_list = get_snap_list(self.mnode)
            self.assertEqual(
                i, len(snap_list), "No of snaps not consistent "
                "for volume %s" % self.volname)
            g.log.info("Successfully validated number of snaps.")

            # Increase counter for next iteration
            self.counter = 1000

        # Restore volume to snapshot snapy2, it should fail
        i = 2
        g.log.info("Starting to restore volume to snapy%s", i)
        ret, _, err = snap_restore(self.mnode, "snapy%s" % i)
        errmsg = ("snapshot restore: failed: Volume (%s) has been started. "
                  "Volume needs to be stopped before restoring a snapshot.\n" %
                  self.volname)
        log_msg = ("Expected : %s, but Returned : %s", errmsg, err)
        self.assertEqual(err, errmsg, log_msg)
        g.log.info("Expected : Failed to restore volume to snapy%s", i)
示例#21
0
    def test_snap_uss_while_io(self):
        # pylint: disable=too-many-statements
        """
        Steps:
        1. Create volume
        2. enable uss on created volume
        3. validate uss running
        4. validate snapd running on all nodes
        5. perform io on mounts
        6. create 10 snapshots with description
        7. validate with snapshot list
        8. validate io is completed
        9. Activate snapshots to list all snaps
           under .snaps
        10. validate snapshots under .snaps directory
        """
        # Enable USS
        g.log.info("Enable USS for volume")
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable USS on volume"
                         "%s" % self.volname)
        g.log.info("Successfully enabled USS on volume %s", self.volname)

        # Validate USS running
        g.log.info("Validating USS enabled or disabled")
        ret = is_uss_enabled(self.mnode, self.volname)
        self.assertTrue(
            ret, "Failed to validate USS for volume "
            "%s" % self.volname)
        g.log.info("Successfully validated USS for Volume" "%s", self.volname)

        # Validate snapd running
        for server in self.servers:
            g.log.info("Validating snapd daemon on:%s", server)
            ret = is_snapd_running(server, self.volname)
            self.assertTrue(ret, "Snapd is Not running on " "%s" % server)
            g.log.info("Snapd Running on node: %s", server)

        # Perform I/O
        g.log.info("Starting to Perform I/O")
        all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Generating data for %s:"
                       "%s", mount_obj.client_system, mount_obj.mountpoint)
            # Create files
            g.log.info('Creating files...')
            command = (
                "python %s create_files -f 100 --fixed-file-size 1M %s" %
                (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               command,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
        self.io_validation_complete = False

        # Creating snapshot with description
        g.log.info("Starting to Create snapshot")
        for count in range(0, self.snap_count):
            self.snap = "snap%s" % count
            ret, _, _ = snap_create(self.mnode,
                                    self.volname,
                                    self.snap,
                                    description='$p3C!@l C#@R@cT#R$')
            self.assertEqual(
                ret, 0,
                ("Failed to create snapshot for volume %s" % self.volname))
            g.log.info("Snapshot %s created successfully"
                       " for volume %s", self.snap, self.volname)

        # Validate snapshot list
        g.log.info("Starting to list all snapshots")
        ret, out, _ = snap_list(self.mnode)
        self.assertEqual(
            ret, 0, ("Failed to list snapshot of volume %s" % self.volname))
        s_list = out.strip().split('\n')
        self.assertEqual(len(s_list), self.snap_count, "Failed to validate "
                         "all snapshots")
        g.log.info(
            "Snapshot listed and  Validated for volume %s"
            " successfully", self.volname)

        # Activating snapshot
        g.log.info("Activating snapshot")
        for count in range(0, self.snap_count):
            self.snap = "snap%s" % count
            ret, _, _ = snap_activate(self.mnode, self.snap)
            self.assertEqual(ret, 0, "Failed to Activate snapshot "
                             "%s" % self.snap)
            g.log.info("snapshot %s activated successfully", self.snap)

        # Validate IO is completed
        self.assertTrue(validate_io_procs(all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")
        self.io_validation_complete = True

        # validate snapshots are listed under .snaps directory
        g.log.info("Validating snaps under .snaps")
        ret = view_snaps_from_mount(self.mounts, s_list)
        self.assertTrue(ret, "Failed to list snaps under .snaps" "directory")
        g.log.info("Snapshots Validated successfully")
示例#22
0
    def test_create_snap_bricks(self):
        """
        1. get brick list
        2. check all bricks are online
        3. Selecting one brick randomly to bring it offline
        4. get brick list
        5. check all bricks are online
        6. Offline Bricks list
        7. Online Bricks list
        8. Create snapshot of volume
        9. snapshot create should fail
        """

        bricks_list = []
        # get the bricks from the volume
        g.log.info("Fetching bricks for the volume : %s" % self.volname)
        bricks_list = get_all_bricks(self.mnode, self.volname)
        g.log.info("Brick List : %s" % bricks_list)

        # check all bricks are online
        g.log.info("Verifying all bricks are online or not.....")
        ret = are_bricks_online(self.mnode, self.volname, bricks_list)
        self.assertTrue(ret, ("Not all bricks are online"))
        g.log.info("All bricks are online.")

        # Selecting one brick randomly to bring it offline
        g.log.info("Selecting one brick randomly to bring it offline")
        brick_to_bring_offline = random.choice(bricks_list)
        g.log.info("Brick to bring offline:%s " % brick_to_bring_offline)
        ret = bring_bricks_offline(self.volname, brick_to_bring_offline, None)
        self.assertTrue(ret, "Failed to bring the bricks offline")
        g.log.info("Randomly Selected brick: %s" % brick_to_bring_offline)

        # get brick list
        g.log.info("Fetching bricks for the volume : %s" % self.volname)
        bricks_list = get_all_bricks(self.mnode, self.volname)
        g.log.info("Brick List : %s" % bricks_list)

        # check all bricks are online
        g.log.info("Verifying all bricks are online or not.....")
        ret = are_bricks_online(self.mnode, self.volname, bricks_list)
        self.assertFalse(ret, ("Not all bricks are online"))
        g.log.info("All bricks are online.")

        # get the bricks for the volume
        g.log.info("Fetching bricks for the volume : %s" % self.volname)
        bricks_list = get_all_bricks(self.mnode, self.volname)
        g.log.info("Brick List : %s" % bricks_list)

        # Offline Bricks list
        offbricks = get_offline_bricks_list(self.mnode, self.volname)
        g.log.info("Bricks Offline: %s" % offbricks)

        # Online Bricks list
        onbricks = get_online_bricks_list(self.mnode, self.volname)
        g.log.info("Bricks Online: %s" % onbricks)

        # Create snapshot of volume
        ret = snap_create(self.mnode, self.volname, "snap1", False,
                          "Description with $p3c1al characters!")
        self.assertTrue(ret, ("Failed to create snapshot snap1"))
        g.log.info("Snapshot snap1 of volume %s created Successfully" %
                   (self.volname))

        # Volume status
        ret = get_volume_info(self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to perform gluster volume"
                              "info on volume %s" % self.volname))
        g.log.info("Gluster volume info on volume %s is successful" %
                   self.volname)
        # snapshot list
        ret = snap_list(self.mnode)
        self.assertTrue(
            ret, ("Failed to list snapshot of volume %s" % self.volname))
        g.log.info("Snapshot list command for volume %s was successful" %
                   self.volname)
示例#23
0
    def test_validate_snaps_dir_over_uss(self):

        # pylint: disable=too-many-statements
        """
        Run IOs on mount and take 2 snapshot.
        Activate 1 snapshot and check directory listing.
        Try to write to .snaps should not allow.
        Try listing the other snapshot should fail.
        """

        # run IOs
        self.counter = 1
        g.log.info("Starting IO on all mounts...")
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = (
                "/usr/bin/env python %s create_deep_dirs_with_files "
                "--dirname-start-num %d "
                "--dir-depth 2 "
                "--dir-length 2 "
                "--max-num-of-dirs 2 "
                "--num-of-files 2 %s" %
                (self.script_upload_path, self.counter, mount_obj.mountpoint))

            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
        self.io_validation_complete = False

        # Validate IO
        self.assertTrue(validate_io_procs(self.all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")
        self.io_validation_complete = True

        # get the snapshot list.
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(len(snap_list), 0, "Unexpected: %s snapshots"
                         "present" % len(snap_list))
        g.log.info("Expected: No snapshots present")

        # Create 2 snapshot
        g.log.info("Starting to Create Snapshots")
        for snap_num in range(0, 2):
            ret, _, _ = snap_create(self.mnode, self.volname,
                                    "snap-%s" % snap_num)
            self.assertEqual(
                ret, 0, "Snapshot Creation failed"
                " for snap-%s" % snap_num)
            g.log.info("Snapshot snap-%s of volume %s created"
                       " successfully", snap_num, self.volname)

        # Activate snap-0
        g.log.info("Activating snapshot snap-0")
        ret, _, _ = snap_activate(self.mnode, "snap-0")
        self.assertEqual(ret, 0, "Failed to activate " "Snapshot snap-0")
        g.log.info("Snapshot snap-0 Activated Successfully")

        # Enable USS for volume
        g.log.info("Enable uss for volume")
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(
            ret, 0, "Failed to enable USS for "
            " volume %s" % self.volname)
        g.log.info("Successfully enabled USS " "for volume %s", self.volname)

        # Validate uss enabled
        g.log.info("Validating uss enabled")
        ret = is_uss_enabled(self.mnode, self.volname)
        self.assertTrue(ret, "Failed to validate uss enable")
        g.log.info("Successfully validated uss enable for volume"
                   "%s", self.volname)

        # list activated snapshots directory under .snaps
        g.log.info("Listing activated snapshots under .snaps")
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(
                ret, 0, "Directory Listing Failed for"
                " Activated Snapshot")
            validate_dir = out.split('\n')
            self.assertIn(
                'snap-0', validate_dir, "Failed to "
                "validate snap-0 under .snaps directory")
            g.log.info("Activated Snapshot Successfully listed")
            self.assertNotIn(
                'snap-1', validate_dir, "Unexpected: "
                "Successfully listed snap-1 under "
                ".snaps directory")
            g.log.info("Expected: De-activated Snapshot not listed")

        # start I/0 ( write and read )
        g.log.info("Starting IO on all mounts...")
        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s create_files "
                   "-f 10 --base-file-name file %s/.snaps/abc/" %
                   (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # IO should fail
        g.log.info("IO should Fail with ROFS error.....")
        self.assertFalse(validate_io_procs(all_mounts_procs, self.mounts),
                         "Unexpected: IO successfully completed")
        g.log.info("Expected: IO failed to complete")

        # validate snap-0 present in mountpoint
        ret = view_snaps_from_mount(self.mounts, "snap-0")
        self.assertTrue(
            ret, "UnExpected: Unable to list content "
            "in activated snapshot"
            " activated snapshot")
        g.log.info("Expected: Successfully listed contents in"
                   " activated snapshot")
示例#24
0
    def test_clone_delete_snap(self):
        """
        clone from snap of one volume
        * Create and Mount the volume
        * Enable some volume options
        * Creating 2 snapshots and activate
        * reset the volume
        * create a clone of snapshots created
        * Mount both the clones
        * Perform I/O on mount point
        * Check volume options of cloned volumes
        * Create snapshot of the cloned snapshot volume
        * cleanup snapshots and volumes
        """

        # pylint: disable=too-many-statements, too-many-locals
        # Enabling Volume options on the volume and validating
        g.log.info("Enabling volume options for volume %s ", self.volname)
        options = {" features.uss": "enable"}
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(
            ret, ("Failed to set volume options for volume %s" % self.volname))
        g.log.info("Successfully set volume options"
                   "for volume %s", self.volname)

        # Validate feature.uss enabled or not
        g.log.info("Validating feature.uss is enabled")
        option = "features.uss"
        vol_option = get_volume_options(self.mnode, self.volname, option)
        self.assertEqual(vol_option['features.uss'], 'enable', "Failed"
                         " to validate "
                         "volume options")
        g.log.info("Successfully validated volume options"
                   "for volume %s", self.volname)

        # Creating snapshot
        g.log.info("Starting to Create snapshot")
        for snap_count in range(0, 2):
            ret, _, _ = snap_create(self.mnode, self.volname,
                                    "snap%s" % snap_count)
            self.assertEqual(
                ret, 0,
                ("Failed to create snapshot for volume %s" % self.volname))
            g.log.info("Snapshot snap%s created successfully"
                       "for volume %s", snap_count, self.volname)

        # Activating snapshot
        g.log.info("Starting to Activate Snapshot")
        for snap_count in range(0, 2):
            ret, _, _ = snap_activate(self.mnode, "snap%s" % snap_count)
            self.assertEqual(
                ret, 0, ("Failed to Activate snapshot snap%s" % snap_count))
            g.log.info("Snapshot snap%s activated successfully", snap_count)

        # Reset volume:
        g.log.info("Starting to Reset Volume")
        ret, _, _ = volume_reset(self.mnode, self.volname, force=False)
        self.assertEqual(ret, 0, ("Failed to reset volume %s" % self.volname))
        g.log.info("Reset Volume on volume %s is Successful", self.volname)

        # Validate feature.uss enabled or not
        g.log.info("Validating feature.uss is enabled")
        option = "features.uss"
        vol_option = get_volume_options(self.mnode, self.volname, option)
        self.assertEqual(vol_option['features.uss'], 'off', "Failed"
                         " to validate "
                         "volume options")
        g.log.info("Successfully validated volume options"
                   "for volume %s", self.volname)

        # Verify volume's all process are online
        g.log.info("Starting to Verify volume's all process are online")
        ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
        self.assertTrue(ret, ("Volume %s : All process are"
                              "not online" % self.volname))
        g.log.info("Volume %s : All process are online", self.volname)

        # Creating and starting a Clone of snapshot
        g.log.info("Starting to Clone Snapshot")
        for clone_count in range(0, 2):
            ret, _, _ = snap_clone(self.mnode, "snap%s" % clone_count,
                                   "clone%s" % clone_count)
            self.assertEqual(ret, 0,
                             ("Failed to clone clone%s volume" % clone_count))
            g.log.info("clone%s volume created successfully", clone_count)

        # Start Cloned volume
        g.log.info("starting to Validate clone volumes are started")
        for clone_count in range(0, 2):
            ret, _, _ = volume_start(self.mnode, "clone%s" % clone_count)
            self.assertEqual(ret, 0, ("Failed to start clone%s" % clone_count))
            g.log.info("clone%s started successfully", clone_count)
        g.log.info("All the clone volumes are started Successfully")

        # Validate Volume start of cloned volume
        g.log.info("Starting to Validate Volume start")
        for clone_count in range(0, 2):
            vol_info = get_volume_info(self.mnode, "clone%s" % clone_count)
            if vol_info["clone%s" % clone_count]['statusStr'] != 'Started':
                raise ExecutionError("Failed to get volume info for clone%s" %
                                     clone_count)
            g.log.info("Volume clone%s is in Started state", clone_count)

        # Validate feature.uss enabled or not
        g.log.info("Validating feature.uss is enabled")
        option = "features.uss"
        for clone_count in range(0, 2):
            vol_option = get_volume_options(self.mnode,
                                            "clone%s" % clone_count, option)
            self.assertEqual(vol_option['features.uss'], 'enable', "Failed"
                             " to validate"
                             "volume options")
            g.log.info(
                "Successfully validated volume options"
                "for volume clone%s", clone_count)

        # Mount both the cloned volumes
        g.log.info("Mounting Cloned Volumes")
        for mount_obj in range(0, 2):
            self.mpoint = "/mnt/clone%s" % mount_obj
            cmd = "mkdir -p  %s" % self.mpoint
            ret, _, _ = g.run(self.clients[0], cmd)
            self.assertEqual(ret, 0, ("Creation of directory %s"
                                      "for mounting"
                                      "volume %s failed: Directory already"
                                      "present" %
                                      (self.mpoint, "clone%s" % mount_obj)))
            g.log.info(
                "Creation of directory %s for mounting volume %s "
                "success", self.mpoint, ("clone%s" % mount_obj))
            ret, _, _ = mount_volume("clone%s" % mount_obj, self.mount_type,
                                     self.mpoint, self.mnode, self.clients[0])
            self.assertEqual(ret, 0, ("clone%s is not mounted" % mount_obj))
            g.log.info("clone%s is mounted Successfully", mount_obj)

        # Perform I/O on mount
        # Start I/O on all mounts
        g.log.info("Starting to Perform I/O on Mountpoint")
        all_mounts_procs = []
        for mount_obj in range(0, 2):
            cmd = ("cd /mnt/clone%s/; for i in {1..10};"
                   "do touch file$i; done; cd;") % mount_obj
            proc = g.run(self.clients[0], cmd)
            all_mounts_procs.append(proc)
        g.log.info("I/O on mountpoint is successful")

        # create snapshot
        g.log.info("Starting to Create snapshot of clone volume")
        ret0, _, _ = snap_create(self.mnode, "clone0", "snap2")
        self.assertEqual(ret0, 0, "Failed to create the snapshot"
                         "snap2 from clone0")
        g.log.info("Snapshots snap2 created successfully from clone0")
        ret1, _, _ = snap_create(self.mnode, "clone1", "snap3")
        self.assertEqual(ret1, 0, "Failed to create the snapshot snap3"
                         "from clone1")
        g.log.info("Snapshots snap3 created successfully from clone1")

        # Listing all Snapshots present
        g.log.info("Starting to list all snapshots")
        ret, _, _ = snap_list(self.mnode)
        self.assertEqual(ret, 0, ("Failed to list snapshots present"))
        g.log.info("Snapshots successfully listed")
    def test_snap_glusterd_down(self):
        # pylint: disable=too-many-statements
        """
        Steps:

        1. create a volume
        2. mount volume
        3. create snapshot of that volume
        4. validate using snapshot info
        5. Activate snapshot
        6. List all snapshots present
        7. validate using snapshot info
        8. Stop glusterd on one node
        9. Check glusterd status
       10. deactivate created snapshot
       11. Start glusterd on that node
       12. Check glusterd status
       13. validate using snapshot info
       13. Check all peers are connected

        """
        # Creating snapshot:
        g.log.info("Starting to Create snapshot")
        ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
        self.assertEqual(ret, 0, ("Failed to create snapshot %s for volume %s"
                                  % (self.snap, self.volname)))
        g.log.info("Snapshot %s created successfully "
                   "for volume %s", self.snap, self.volname)

        # Check snapshot info
        g.log.info("Checking snapshot info")
        snap_info = get_snap_info_by_snapname(self.mnode, self.snap)
        self.assertIsNotNone(snap_info, "Failed to get snap information"
                             "for snapshot %s" % self.snap)
        status = snap_info['snapVolume']['status']
        self.assertNotEqual(status, 'Started', "snapshot %s "
                            "not started" % self.snap)
        g.log.info("Successfully checked snapshot info")

        # Activating snapshot
        g.log.info("Starting to Activate Snapshot")
        ret, _, _ = snap_activate(self.mnode, self.snap)
        self.assertEqual(ret, 0, ("Failed to Activate snapshot %s"
                                  % self.snap))
        g.log.info("Snapshot %s activated successfully", self.snap)

        # snapshot list
        g.log.info("Starting to validate list of snapshots")
        snap_list1 = get_snap_list(self.mnode)
        self.assertIsNotNone(snap_list1, "Failed to list all the snapshot")
        self.assertEqual(len(snap_list1), 1, "Failed to validate snap list")
        g.log.info("Snapshot list successfully validated")

        # Check snapshot info
        g.log.info("Checking snapshot info")
        snap_info = get_snap_info_by_snapname(self.mnode, self.snap)
        status = snap_info['snapVolume']['status']
        self.assertEqual(status, 'Started', "Failed to"
                         "start snapshot info")
        g.log.info("Successfully checked snapshot info")

        # Stop Glusterd on one node
        g.log.info("Stopping Glusterd on one node")
        ret = stop_glusterd(self.servers[1])

        # Check Glusterd status
        g.log.info("Check glusterd running or not")
        count = 0
        while count < 80:
            ret = is_glusterd_running(self.servers[1])
            if ret == 1:
                break
            time.sleep(2)
            count += 2
        self.assertEqual(ret, 1, "Unexpected: glusterd running on node %s" %
                         self.servers[1])
        g.log.info("Expected: Glusterd not running on node %s",
                   self.servers[1])

        # de-activating snapshot
        g.log.info("Starting to de-activate Snapshot")
        ret, _, _ = snap_deactivate(self.mnode, self.snap)
        self.assertEqual(ret, 0, ("Failed to deactivate snapshot %s"
                                  % self.snap))
        g.log.info("Snapshot %s deactivated successfully", self.snap)

        # validate snapshot info
        g.log.info("Checking snapshot info")
        snap_info = get_snap_info_by_snapname(self.mnode, self.snap)
        status = snap_info['snapVolume']['status']
        self.assertNotEqual(status, 'Started', "snapshot %s "
                            "not started" % self.snap)
        g.log.info("Successfully validated snapshot info")

        # Start Glusterd on node
        g.log.info("Starting Glusterd on node %s", self.servers[1])
        ret = start_glusterd(self.servers[1])
        self.assertTrue(ret, "Failed to start glusterd on %s node"
                        % self.servers[1])
        g.log.info("Successfully started glusterd on "
                   "%s node", self.servers[1])

        # Check Glusterd status
        g.log.info("Check glusterd running or not")
        count = 0
        while count < 80:
            ret = is_glusterd_running(self.servers[1])
            if ret:
                break
            time.sleep(2)
            count += 2
        self.assertEqual(ret, 0, "glusterd not running on node %s "
                         % self.servers[1])
        g.log.info("glusterd is running on %s node",
                   self.servers[1])

        # validate snapshot info
        g.log.info("Checking snapshot info")
        snap_info = get_snap_info_by_snapname(self.mnode, self.snap)
        self.assertIsNotNone(snap_info, "Failed to get snap info for"
                             " snapshot %s" % self.snap)
        status = snap_info['snapVolume']['status']
        self.assertNotEqual(status, 'Started', "snapshot"
                            " %s failed to validate with snap info"
                            % self.snap)
        g.log.info("Successfully validated snapshot info")

        # Check all the peers are in connected state
        g.log.info("Validating all the peers are in connected state")
        for servers in self.servers:
            count = 0
            while count < 80:
                ret = is_peer_connected(self.mnode, servers)
                if ret:
                    break
                time.sleep(2)
                count += 2
            self.assertTrue(ret, "All the nodes are not in cluster")
        g.log.info("Successfully validated all the peers")
示例#26
0
    def test_create_snapshot_and_verify_content(self):
        """
        - Create an arbiter volume
        - Create IO
        - Calculate arequal of the mount point
        - Take a snapshot of the volume
        - Create new data on mount point
        - Restore the snapshot
        - Calculate arequal of the mount point
        - Compare arequals
        """
        # Creating files on client side
        g.log.info("Generating data for %s:%s", self.mounts[0].client_system,
                   self.mounts[0].mountpoint)
        # Create dirs with file
        g.log.info('Creating dirs with file...')
        command = ("python %s create_deep_dirs_with_files "
                   "-d 2 "
                   "-l 2 "
                   "-n 2 "
                   "-f 20 "
                   "%s" % (self.script_upload_path, self.mounts[0].mountpoint))

        ret, _, err = g.run(self.mounts[0].client_system,
                            command,
                            user=self.mounts[0].user)

        self.assertFalse(ret, err)
        g.log.info("IO is successful")

        # Get arequal before snapshot
        g.log.info('Getting arequal before snapshot...')
        ret, arequal_before_snapshot = collect_mounts_arequal(self.mounts[0])
        self.assertTrue(ret, 'Failed to get arequal')
        g.log.info('Getting arequal before snapshot is successful')

        # Create snapshot
        snapshot_name = 'testsnap'
        g.log.info("Creating snapshot %s ...", snapshot_name)
        ret, _, err = snap_create(self.mnode, self.volname, snapshot_name)
        self.assertEqual(ret, 0, err)
        g.log.info("Snapshot %s created successfully", snapshot_name)

        # Add files on client side
        g.log.info("Generating data for %s:%s", self.mounts[0].client_system,
                   self.mounts[0].mountpoint)
        # Create dirs with file
        g.log.info('Adding dirs with file...')
        command = ("python %s create_deep_dirs_with_files "
                   "-d 2 "
                   "-l 2 "
                   "-n 2 "
                   "-f 20 "
                   "%s" % (self.script_upload_path,
                           self.mounts[0].mountpoint + '/new_files'))

        ret, _, err = g.run(self.mounts[0].client_system,
                            command,
                            user=self.mounts[0].user)

        self.assertFalse(ret, err)
        g.log.info("IO is successful")

        # Stop the volume
        g.log.info("Stopping %s ...", self.volname)
        ret, _, err = volume_stop(self.mnode, self.volname)
        self.assertEqual(ret, 0, err)
        g.log.info("Volume %s stopped successfully", self.volname)

        # Revert snapshot
        g.log.info("Reverting snapshot %s ...", snapshot_name)
        ret, _, err = snap_restore(self.mnode, snapshot_name)
        self.assertEqual(ret, 0, err)
        g.log.info("Snapshot %s restored successfully", snapshot_name)

        # Start the volume
        g.log.info("Starting %s ...", self.volname)
        ret, _, err = volume_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, err)
        g.log.info("Volume %s started successfully", self.volname)

        # Wait for volume processes to be online
        g.log.info("Wait for volume processes to be online")
        ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to wait for volume %s processes to "
                              "be online", self.volname))
        g.log.info(
            "Successful in waiting for volume %s processes to be "
            "online", self.volname)

        # Verify volume's all process are online
        g.log.info("Verifying volume's all process are online")
        ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
        self.assertTrue(
            ret, ("Volume %s : All process are not online" % self.volname))
        g.log.info("Volume %s : All process are online", self.volname)

        # Get arequal after restoring snapshot
        g.log.info('Getting arequal after restoring snapshot...')
        ret, arequal_after_restoring = collect_mounts_arequal(self.mounts[0])
        self.assertTrue(ret, "Collecting arequal-checksum failed")

        # Checking arequals before creating and after restoring snapshot
        self.assertEqual(
            arequal_before_snapshot, arequal_after_restoring,
            'Arequal before creating snapshot '
            'and after restoring snapshot are not equal')
        g.log.info('Arequal before creating snapshot '
                   'and after restoring snapshot are equal')
示例#27
0
    def test_activate_deactivate(self):
        # pylint: disable=too-many-branches, too-many-statements
        """
        Verifying Snapshot activation/deactivation functionality.

        * Create Snapshot
        * Validate snapshot info before activation
        * Validate snapshot status before activation
        * Activate snapshot
        * Validate snapshot info after activation
        * Validate snapshot status after activation
        * Deactivate snapshot
        * Validate snapshot info after deactivation
        * Validate snapshot status after deactivation
        """

        # Create Snapshot
        snap_name = 'snap_%s' % self.volname
        g.log.info("Starting to Create Snapshot %s", snap_name)
        ret, _, _ = snap_create(self.mnode, self.volname, snap_name)
        self.assertEqual(ret, 0,
                         ("Snapshot Creation failed for %s", snap_name))
        g.log.info("Successfully created Snapshot %s for volume %s", snap_name,
                   self.volname)

        # Validate Snapshot Info Before Activation
        g.log.info("Validating 'snapshot info' in 'stopped' state before "
                   "activating the snapshot")
        ret = get_snap_info_by_snapname(self.mnode, snap_name)
        self.assertIsNotNone(
            ret, ("Failed to Fetch Snapshot info for %s", snap_name))
        g.log.info("Snapshot info Success for %s", ret['snapVolume']['status'])
        self.assertEqual(
            ret['snapVolume']['status'], 'Stopped',
            ("Unexpected: Snapshot %s Status is in Started state", snap_name))
        g.log.info("Expected: Snapshot is in Stopped state as it is "
                   "not Activated")

        # Validate Snapshot Status Before Activation
        g.log.info("Validating 'snapshot status' in 'stopped' state before "
                   "activating the snapshot")
        ret = get_snap_status_by_snapname(self.mnode, snap_name)
        self.assertIsNotNone(
            ret, ("Failed to Fetch Snapshot status for %s", snap_name))
        g.log.info("Snapshot Status Success for %s", snap_name)
        for brick in ret['volume']['brick']:
            self.assertEqual(brick['pid'], 'N/A',
                             ("Unexpected: Brick Pid '%s' is available for %s",
                              brick['pid'], brick['path']))
        g.log.info("Expected: Deactivated Snapshot Brick PID is 'N/A'")

        # Activate Snapshot
        g.log.info("Starting to Activate %s", snap_name)
        ret, _, _ = snap_activate(self.mnode, snap_name)
        self.assertEqual(ret, 0,
                         ("Snapshot Activation Failed for %s", snap_name))
        g.log.info("Snapshot %s Activated Successfully", snap_name)

        # Validate Snapshot Info After Activation
        g.log.info("Validating 'snapshot info' in 'started' state after"
                   " activating the snapshot")
        snap_info = get_snap_info_by_snapname(self.mnode, snap_name)
        self.assertEqual(snap_info['snapVolume']['status'], "Started",
                         ("Failed to Fetch Snapshot info after activate "
                          "for %s", snap_name))
        g.log.info("Success: Snapshot info in 'started' state")

        # Validate Snaphot Status After Activation
        g.log.info("Validating 'snapshot status' in started state after "
                   "activating the snapshot")
        ret = get_snap_status_by_snapname(self.mnode, snap_name)
        for brick in ret['volume']['brick']:
            self.assertNotEqual(brick['pid'], 'N/A',
                                ("Brick Path %s  Not Available for Activated "
                                 "Snapshot %s", (brick['path'], snap_name)))
        g.log.info("Sucessfully validated Activated Snapshot Brick Path "
                   "Available")

        # Deactivate Snapshot
        g.log.info("Starting to Deactivate %s", snap_name)
        ret, _, _ = snap_deactivate(self.mnode, snap_name)
        self.assertEqual(ret, 0,
                         ("Snapshot Deactivation Failed for %s", snap_name))
        g.log.info("Successfully Deactivated Snapshot %s", snap_name)

        # Validate Snapshot Info After Deactivation
        g.log.info("Validating 'snapshot info' in stopped state after "
                   "deactivating the snapshot")
        ret = get_snap_info_by_snapname(self.mnode, snap_name)
        self.assertEqual(ret['snapVolume']['status'], 'Stopped',
                         ("Snapshot Status is not in 'Stopped' State"))
        g.log.info("Expected: Snapshot is in Stopped state after Deactivation")

        # Validate Snaphot Status After Deactivation
        g.log.info("Validating 'snapshot status' in started state after "
                   "deactivating the snapshot")
        ret = get_snap_status_by_snapname(self.mnode, snap_name)
        for brick in ret['volume']['brick']:
            self.assertEqual(brick['pid'], 'N/A',
                             ("Deactivated Snapshot Brick Pid %s available "
                              "for %s", brick['pid'], brick['path']))
        g.log.info("Expected: Deactivated Snapshot Brick PID is 'N/A'")
示例#28
0
    def test_auto_delete_snap(self):
        """
        * enabling auto-delete snapshot
        * Setting max-hard limit and max-soft-limit
        * Validating max-hard-limit and max-soft-limit
        * Verify the limits by creating another 20 snapshots
        * Oldest of newly created snapshots will be deleted
        * Retaining the latest 8(softlimit) snapshots
        * cleanup snapshots and volumes
        """
        # Setup volume
        ret = self.setup_volume()
        if not ret:
            raise ExecutionError("Failed to setup volume %s" % self.volname)
        g.log.info("Volume %s has been setup successfully" % self.volname)

        # enabling auto-delete
        cmd = "gluster snapshot config auto-delete enable"
        ret = g.run(self.mnode, cmd)
        self.assertTrue(ret, ("Failed to enable auto-delete snapshot config"
                              "option on volume % s" % self.volname))
        g.log.info("Snapshot auto-delete Successfully enabled")

        # setting max-hard-limit
        option = {'snap-max-hard-limit': '10'}
        ret = set_snap_config(self.mnode, option, self.volname)
        self.assertTrue(ret, ("Failed to set snap-max-hardlimit"
                              "config option for volume %s" % self.volname))
        g.log.info("snap-max-hardlimit config option Successfully set for"
                   "volume %s" % self.volname)

        # Validating max-hard-limit
        hardlimit = get_snap_config(self.mnode)
        get_hardlimit = hardlimit['volumeConfig'][0]['hardLimit']
        if get_hardlimit != '10':
            self.assertTrue(ret, ("Failed to Validate max-hard-limit"))
        g.log.info("Successfully validated max-hard-limit")

        # setting max-soft-limit
        option = {'snap-max-soft-limit': '80'}
        ret = set_snap_config(self.mnode, option)
        self.assertTrue(ret, ("Failed to set snap-max-soft-limit"
                              "config option"))
        g.log.info("snap-max-soft-limit config option Successfully set")

        # Validating max-soft-limit
        softlimit = get_snap_config(self.mnode)
        get_softlimit = softlimit['volumeConfig'][0]['softLimit']
        if get_softlimit != '8':
            self.assertTrue(ret, ("Failed to Validate max-soft-limit"))
        g.log.info("Successfully validated max-soft-limit")

        # creating 20 snapshots. As the count
        # of snapshots crosses the
        # soft-limit the oldest of newly created snapshot should
        # be deleted only latest 8 snapshots
        # should remain.

        # creating 20 more snapshots
        for snap_count in range(10, 30):
            ret = snap_create(
                self.mnode, self.volname, "snap%s" % snap_count, False,
                "This is the Description with $p3c1al"
                "characters!")
            self.assertTrue(ret, ("Failed to create snapshot snap%s for volume"
                                  "%s" % (snap_count, self.volname)))
            g.log.info("Snapshot snap%s of volume %s created successfully")

        # snapshot list to list total number of snaps after auto-delete
        cmd = "gluster snapshot list | wc -l"
        ret, out, _ = g.run(self.mnode, cmd)
        self.assertEqual(
            ret, 0, ("Failed to list snapshot of volume %s" % self.volname))
        g.log.info("Total number of snapshots created after auto-delete"
                   "enabled is %s" % out)
        if out != 8:
            g.log.info("Failed to validate snapshots with expected"
                       "number of snapshots")
        g.log.info("Snapshot Validation Successful")
        g.log.info("Snapshot list command for volume %s was successful" %
                   self.volname)
    def test_snap_list_glusterd_restart(self):
        """
        Verify snapshot list before and after glusterd restart

        * Create 3 snapshots of the volume
        * Delete one snapshot
        * List all snapshots created
        * Restart glusterd on all nodes
        * List all snapshots
          All snapshots must be listed except the one that was deleted
        """

        # pylint: disable=too-many-statements
        # Create snapshots
        for snap in self.snapshots:
            ret, _, _ = snap_create(self.mnode, self.volname, snap)
            self.assertEqual(ret, 0, ("Failed to create snapshot %s for "
                                      "volume %s" % (snap, self.volname)))
            g.log.info("Snapshot %s created successfully "
                       "for volume %s", snap, self.volname)

        # List the snapshots and validate with snapname
        snap_list = get_snap_list(self.mnode)
        self.assertIsNotNone(snap_list, "Failed to list all snapshots")
        self.assertEqual(len(snap_list), 3, "Failed to validate snap list")
        g.log.info("Successfully validated snap list")
        for snap in self.snapshots:
            self.assertIn(
                snap, snap_list, "Failed to validate the snapshot "
                "%s in the snapshot list" % snap)
        g.log.info("Successfully validated the presence of snapshots using "
                   "snapname")

        # Delete one snapshot
        ret, _, _ = snap_delete(self.mnode, self.snapshots[0])
        self.assertEqual(ret, 0,
                         ("Failed to delete snapshot %s" % self.snapshots[0]))
        g.log.info("Snapshots %s deleted Successfully", self.snapshots[0])

        # List the snapshots and validate with snapname
        snap_list = get_snap_list(self.mnode)
        self.assertIsNotNone(snap_list, "Failed to list all snapshots")
        self.assertEqual(len(snap_list), 2, "Failed to validate snap list")
        g.log.info("Successfully validated snap list")
        for snap in self.snapshots[1:]:
            self.assertIn(
                snap, snap_list, "Failed to validate the snapshot "
                "%s in the snapshot list" % snap)
        g.log.info("Successfully validated the presence of snapshots using "
                   "snapname")

        # Restart glusterd on all the servers
        ret = restart_glusterd(self.servers)
        self.assertTrue(
            ret, ("Failed to restart glusterd on nodes %s" % self.servers))
        g.log.info("Successfully restarted glusterd on nodes %s", self.servers)

        # Wait for glusterd to be online and validate glusterd running on all
        # server nodes
        self.assertTrue(
            wait_for_glusterd_to_start(self.servers),
            "Unexpected: glusterd not up on one or more of the nodes")
        g.log.info("Glusterd is up and running on all nodes")

        # Check if peers are connected
        self.assertTrue(is_peer_connected(self.mnode, self.servers),
                        "Unexpected: Peers are not in connected state")
        g.log.info("Successful: All peers are in connected state")

        # List the snapshots after glusterd restart
        # All snapshots must be listed except the one deleted
        for server in self.servers:
            snap_list = get_snap_list(server)
            self.assertIsNotNone(
                snap_list,
                "Failed to get the list of snapshots in node %s" % server)
            self.assertEqual(
                len(snap_list), 2,
                "Unexpected: Number of snapshots not consistent in the node %s"
                % server)
            g.log.info("Successfully validated snap list for node %s", server)
            for snap in self.snapshots[1:]:
                self.assertIn(
                    snap, snap_list, "Failed to validate the snapshot "
                    "%s in the snapshot list" % snap)
            g.log.info(
                "Successfully validated the presence of snapshots "
                "using snapname for node %s", server)
    def test_snap_invalid_case(self):

        # Creating snapshot:
        g.log.info("Starting to Create snapshot")
        for count in range(1, 3):
            self.snap = "snap%s" % count
            ret = snap_create(self.mnode, self.volname, self.snap)
            self.assertTrue(
                ret,
                ("Failed to create snapshot for volume %s" % self.volname))
        g.log.info("Snapshot %s created successfully"
                   " for volume %s", self.snap, self.volname)

        # Check snapshot info for non-existing snapshot
        g.log.info("Checking snapshot info")
        ret, _, _ = snap_info(self.mnode, self.snap5)
        self.assertEqual(
            ret, 1, "Unexpected: Successful in "
            "getting information for"
            "non-existing %s snapshot" % self.snap5)
        g.log.info(
            "Expected result: failed to get information"
            " for non-existing %s snapshot", self.snap5)

        # Check snapshot status for non-existing snapshot
        g.log.info("Checking snapshot status")
        ret, _, _ = snap_status(self.mnode, self.snap5)
        self.assertEqual(
            ret, 1, "Unexpected: Successful in getting "
            "status for non-existing "
            "%s snapshot" % self.snap5)
        g.log.info(
            "Expected result: failed to get status"
            " for non-existing %s snapshot", self.snap5)

        # Check snapshot info for non-existing volume
        g.log.info("Checking snapshot info")
        ret, _, _ = snap_info(self.mnode, self.volname1)
        self.assertEqual(
            ret, 1, "Unexpected: Successful in getting "
            "information for"
            "non-existing %s volume" % self.volname1)
        g.log.info(
            "Expected result: failed to get information"
            " for non-existing %s volume", self.volname1)

        # Check snapshot status for non-existing volume
        g.log.info("Checking snapshot status")
        ret, _, _ = snap_info(self.mnode, self.volname1)
        self.assertEqual(
            ret, 1, "Unexpected: Successful in getting "
            "status for non-existing "
            "%s volume" % self.volname1)
        g.log.info(
            "Expected result: Need to fail to get status"
            " for non-existing %s volume", self.volname1)

        # Invalid command
        g.log.info("Passing invalid status command")
        cmd = "gluster snapshot snap1 status"
        ret, _, _ = g.run(self.mnode, cmd)
        self.assertEqual(
            ret, 1, "Unexpected: Snapshot status"
            " command Successful even with Invalid"
            " command")
        g.log.info("Expected result: snapshot status command failed")

        # Invalid command
        g.log.info("Passing invalid info command")
        cmd = "gluster snapshot snap1 info"
        ret, _, _ = g.run(self.mnode, cmd)
        self.assertEqual(
            ret, 1, "Unexpected: Snapshot info "
            "command Successful even with Invalid "
            " command")
        g.log.info("Expected result: snapshot information command Failed")