コード例 #1
0
    def test_peer_probe_snapd_running(self):
        '''
        -> Create Volume
        -> Create snap for that volume
        -> Enable uss
        -> Check snapd running or not
        -> Probe a new node while snapd is running
        '''

        # Performing node detach, Here detached node considering as extra
        # server
        extra_node = self.servers[-1]
        ret, _, _ = peer_detach(self.mnode, extra_node)
        self.assertEqual(ret, 0, "Peer detach failed for %s" % extra_node)
        g.log.info("Peer detach success for %s", extra_node)

        # Removing detached node from 'self.servers' list, it's because of
        # 'self.setup_volume' function checking peer status of 'self.servers'
        # list before creating volume
        self.servers.remove(extra_node)

        # Creating volume
        ret = self.setup_volume()
        self.assertTrue(ret, "Failed Create volume %s" % self.volname)
        g.log.info("Volume created successfully %s", self.volname)

        # Adding node back into self.servers list
        self.servers.append(extra_node)

        # creating Snap
        ret, _, _ = snap_create(self.mnode, self.volname, 'snap1')
        self.assertEqual(ret, 0,
                         "Snap creation failed for volume %s" % self.volname)
        g.log.info("Snap created successfully for volume %s", self.volname)

        # Enabling Snapd(USS)
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0,
                         "Failed to enable USS for volume %s" % self.volname)
        g.log.info("USS Enabled successfully on volume %s", self.volname)

        # Checking snapd running or not
        ret = is_snapd_running(self.mnode, self.volname)
        self.assertTrue(ret, "Snapd not running for volume %s" % self.volname)
        g.log.info("snapd running for volume %s", self.volname)

        # Probing new node
        ret = peer_probe_servers(self.mnode, extra_node)
        self.assertTrue(ret,
                        "Peer Probe failed for new server %s" % extra_node)
        g.log.info("Peer Probe success for new server %s", extra_node)
コード例 #2
0
 def validate_snapd(self, check_condition=True):
     """ Validate snapd running """
     for server in self.server_list:
         ret = is_snapd_running(server, self.clone_vol1)
         if check_condition:
             self.assertTrue(
                 ret, "Unexpected: Snapd is Not running for "
                 "volume %s on node %s" % (self.clone_vol1, server))
             g.log.info(
                 "Snapd Running for volume %s "
                 "on node: %s", self.clone_vol1, server)
         else:
             self.assertFalse(
                 ret, "Unexpected: Snapd is running for"
                 "volume %s on node %s" % (self.clone_vol1, server))
             g.log.info("Expected: Snapd is not Running for volume"
                        " %s on node: %s", self.clone_vol1, server)
コード例 #3
0
    def test_uss_snap_active_deactive(self):

        # pylint: disable=too-many-statements
        """
        Steps:
        * Create volume
        * Mount volume
        * Perform I/O on mounts
        * Create 2 snapshots snapy1 & snapy2
        * Validate snap created
        * Enable USS
        * Validate USS is enabled
        * Validate snapd is running
        * Activate snapy1 & snapy2
        * List snaps under .snap directory
          -- snap1 and snap2 should be listed under .snaps
        * Deactivate snapy2
        * List snaps under .snap directory
          -- snapy2 is not listed as it is deactivated
        * Activate snapy2
        * List snaps under .snap directory
          -- snap1 and snap2 should be listed under .snaps
        """

        # Perform I/O
        g.log.info("Starting IO on all mounts...")
        self.counter = 1
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = (
                "python %s create_deep_dirs_with_files "
                "--dirname-start-num %d "
                "--dir-depth 2 "
                "--dir-length 2 "
                "--max-num-of-dirs 2 "
                "--num-of-files 2 %s" %
                (self.script_upload_path, self.counter, mount_obj.mountpoint))

            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
        self.io_validation_complete = False

        # Validate IO
        g.log.info("Wait for IO to complete and validate IO ...")
        ret = validate_io_procs(self.all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        self.io_validation_complete = True
        g.log.info("I/O successful on clients")

        # Enable USS
        g.log.info("Enable USS on volume")
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable USS on volume")
        g.log.info("Successfully enabled USS on volume")

        # Validate USS is enabled
        g.log.info("Validating USS is enabled")
        ret = is_uss_enabled(self.mnode, self.volname)
        self.assertTrue(ret, "USS is disabled on volume " "%s" % self.volname)
        g.log.info("USS enabled on volume %s", self.volname)

        # Validate snapd running
        for server in self.servers:
            g.log.info("Validating snapd daemon on:%s", server)
            ret = is_snapd_running(server, self.volname)
            self.assertTrue(ret, "Snapd is Not running on " "%s" % server)
            g.log.info("Snapd Running on node: %s", server)

        # Create 2 snapshot
        g.log.info("Creating 2 snapshots for volume %s", self.volname)
        for i in range(1, 3):
            ret, _, _ = snap_create(self.mnode, self.volname, "snapy%s" % i)
            self.assertEqual(
                ret, 0, ("Failed to create snapshot for %s" % self.volname))
            g.log.info("Snapshot %s created successfully for volume  %s",
                       "snapy%s" % i, self.volname)

        # Check for no of snaps using snap_list it should be 2 now
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            2, len(snap_list), "No of snaps not consistent "
            "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snaps.")

        # Activate snapshot snapy1 & snapy2
        g.log.info("Activating snapshot snapy1 & snapy2")
        for i in range(1, 3):
            ret, _, _ = snap_activate(self.mnode, "snapy%s" % i)
            self.assertEqual(ret, 0, "Failed to activate snapshot snapy%s" % i)
        g.log.info("Both snapshots activated successfully")

        # list activated snapshots directory under .snaps
        g.log.info("Listing activated snapshots under .snaps")
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(
                ret, 0, "Directory Listing Failed for"
                " Activated Snapshot")
            validate_dir = out.split('\n')
            self.assertIn(
                "snapy1", validate_dir, "Failed to "
                "validate snapy1 under .snaps directory")
            g.log.info("Activated Snapshot snapy1 listed Successfully")
            self.assertIn(
                "snapy2", validate_dir, "Successfully listed"
                " snapy2 under.snaps directory")
            g.log.info("Expected: De-activated Snapshot not listed")

        # Deactivate snapshot snapy2
        g.log.info("Deactivating snapshot snapy2")
        ret, _, _ = snap_deactivate(self.mnode, "snapy2")
        self.assertEqual(ret, 0, "Failed to deactivate snapshot snapy2")
        g.log.info("Successfully deactivated snapshot snapy2")

        # validate snapy2 should not present in mountpoint
        ret = view_snaps_from_mount(self.mounts, "snapy2")
        self.assertFalse(
            ret, " UnExpected : Still able to View snapy2"
            " from mount ")
        g.log.info("Successfully verified deactivated snapshot "
                   "snapy2 is not listed")

        # Activate snapshot snapy2
        ret, _, _ = snap_activate(self.mnode, "snapy2")
        self.assertEqual(ret, 0, "Failed to activate Snapshot snapy2")
        g.log.info("Snapshot snapy2 activated successfully")

        # list activated snapshots directory under .snaps
        g.log.info("Listing activated snapshots under .snaps")
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(
                ret, 0, "Directory Listing Failed for"
                " Activated Snapshot")
            validate_dir = out.split('\n')
            self.assertIn(
                "snapy1", validate_dir, "Failed to "
                "validate snapy%s under .snaps directory")
            g.log.info("Activated Snapshot listed Successfully")
            self.assertIn(
                "snapy2", validate_dir, "Successfully listed"
                "snapy2 under .snaps directory")
            g.log.info("Expected: De-activated Snapshot not listed")
コード例 #4
0
    def test_bitd_scrubd_snapd_after_volume_reset(self):
        # pylint: disable=too-many-statements
        '''
        -> Create volume
        -> Enable BitD, Scrub and Uss on volume
        -> Verify  the BitD, Scrub and Uss  daemons are running on every node
        -> Reset the volume
        -> Verify the Daemons (BitD, Scrub & Uss ) are running or not
        -> Eanble Uss on same volume
        -> Reset the volume with force
        -> Verify all the daemons(BitD, Scrub & Uss) are running or not
        '''

        # enable bitrot and scrub on volume
        g.log.info("Enabling bitrot")
        ret, _, _ = enable_bitrot(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable bitrot on volume: %s" %
                         self.volname)
        g.log.info("Bitd and scrub daemons enabled successfully on volume :%s",
                   self.volname)

        # enable uss on volume
        g.log.info("Enabling snaphot(uss)")
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable uss on volume: %s" %
                         self.volname)
        g.log.info("uss enabled successfully on  volume :%s", self.volname)

        # Checks bitd, snapd, scrub daemons running or not
        g.log.info("checking snapshot, scrub and bitrot\
        daemons running or not")
        node_list = []
        list_of_bricks = get_all_bricks(self.mnode, self.volname)
        for brick in list_of_bricks:
            node, _ = brick.split(r':')
            node_list.append(node)
        for mnode in node_list:
            ret = is_bitd_running(mnode, self.volname)
            self.assertTrue(ret, "Bitrot Daemon not running on %s server:"
                            % mnode)
            ret = is_scrub_process_running(mnode, self.volname)
            self.assertTrue(ret, "Scrub Daemon not running on %s server:"
                            % mnode)
            ret = is_snapd_running(mnode, self.volname)
            self.assertTrue(ret, "Snap Daemon not running %s server:" % mnode)
        g.log.info("bitd, scrub and snapd running successflly on volume :%s",
                   self.volname)

        # command for volume reset
        g.log.info("started resetting volume")
        cmd = "gluster volume reset " + self.volname
        ret, _, _ = g.run(self.mnode, cmd)
        self.assertEqual(ret, 0, "volume reset failed for : %s" % self.volname)
        g.log.info("Volume reset successfully :%s", self.volname)

        # After volume reset snap daemon will not be running,
        # bitd and scrub daemons will be in running state.
        g.log.info("checking snapshot, scrub and bitrot daemons\
        running or not after volume reset")
        for mnode in node_list:
            ret = is_bitd_running(mnode, self.volname)
            self.assertTrue(ret, "Bitrot Daemon\
            not running on %s server:" % mnode)
            ret = is_scrub_process_running(mnode, self.volname)
            self.assertTrue(ret, "Scrub Daemon\
            not running on %s server:" % mnode)
            ret = is_snapd_running(mnode, self.volname)
            self.assertFalse(ret, "Snap Daemon should not be running on %s "
                             "server after volume reset:" % mnode)
        g.log.info("bitd and scrub daemons are running after volume reset "
                   "snapd is not running as expected on volume :%s",
                   self.volname)

        # enable uss on volume
        g.log.info("Enabling snaphot(uss)")
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable uss on volume: %s" %
                         self.volname)
        g.log.info("uss enabled successfully on volume :%s", self.volname)

        # command for volume reset with force
        g.log.info("started resetting volume with force option")
        cmd = "gluster volume reset " + self.volname + " force"
        ret, _, _ = g.run(self.mnode, cmd)
        self.assertEqual(ret, 0, "volume reset fail\
               for : %s" % self.volname)
        g.log.info("Volume reset successfully with force option :%s",
                   self.volname)

        # After volume reset bitd, snapd, scrub daemons will not be running,
        # all three daemons will get die
        g.log.info("checking snapshot, scrub and bitrot daemons\
        running or not after volume reset with force")
        for mnode in node_list:
            ret = is_bitd_running(mnode, self.volname)
            self.assertFalse(ret, "Bitrot Daemon should not be\
            running on %s server after volume reset with force:" % mnode)
            ret = is_scrub_process_running(mnode, self.volname)
            self.assertFalse(ret, "Scrub Daemon shiuld not be running\
            on %s server after volume reset with force:" % mnode)
            ret = is_snapd_running(mnode, self.volname)
            self.assertFalse(ret, "Snap Daemon should not be\
            running on %s server after volume reset force:" % mnode)
        g.log.info("After volume reset bitd, scrub and snapd are not running "
                   "after volume reset with force on volume :%s", self.volname)
コード例 #5
0
    def test_snap_uss_while_io(self):
        # pylint: disable=too-many-statements
        """
        Steps:
        1. Create volume
        2. enable uss on created volume
        3. validate uss running
        4. validate snapd running on all nodes
        5. perform io on mounts
        6. create 10 snapshots with description
        7. validate with snapshot list
        8. validate io is completed
        9. Activate snapshots to list all snaps
           under .snaps
        10. validate snapshots under .snaps directory
        """
        # Enable USS
        g.log.info("Enable USS for volume")
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable USS on volume"
                         "%s" % self.volname)
        g.log.info("Successfully enabled USS on volume %s", self.volname)

        # Validate USS running
        g.log.info("Validating USS enabled or disabled")
        ret = is_uss_enabled(self.mnode, self.volname)
        self.assertTrue(
            ret, "Failed to validate USS for volume "
            "%s" % self.volname)
        g.log.info("Successfully validated USS for Volume" "%s", self.volname)

        # Validate snapd running
        for server in self.servers:
            g.log.info("Validating snapd daemon on:%s", server)
            ret = is_snapd_running(server, self.volname)
            self.assertTrue(ret, "Snapd is Not running on " "%s" % server)
            g.log.info("Snapd Running on node: %s", server)

        # Perform I/O
        g.log.info("Starting to Perform I/O")
        all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Generating data for %s:"
                       "%s", mount_obj.client_system, mount_obj.mountpoint)
            # Create files
            g.log.info('Creating files...')
            command = (
                "python %s create_files -f 100 --fixed-file-size 1M %s" %
                (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               command,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
        self.io_validation_complete = False

        # Creating snapshot with description
        g.log.info("Starting to Create snapshot")
        for count in range(0, self.snap_count):
            self.snap = "snap%s" % count
            ret, _, _ = snap_create(self.mnode,
                                    self.volname,
                                    self.snap,
                                    description='$p3C!@l C#@R@cT#R$')
            self.assertEqual(
                ret, 0,
                ("Failed to create snapshot for volume %s" % self.volname))
            g.log.info("Snapshot %s created successfully"
                       " for volume %s", self.snap, self.volname)

        # Validate snapshot list
        g.log.info("Starting to list all snapshots")
        ret, out, _ = snap_list(self.mnode)
        self.assertEqual(
            ret, 0, ("Failed to list snapshot of volume %s" % self.volname))
        s_list = out.strip().split('\n')
        self.assertEqual(len(s_list), self.snap_count, "Failed to validate "
                         "all snapshots")
        g.log.info(
            "Snapshot listed and  Validated for volume %s"
            " successfully", self.volname)

        # Activating snapshot
        g.log.info("Activating snapshot")
        for count in range(0, self.snap_count):
            self.snap = "snap%s" % count
            ret, _, _ = snap_activate(self.mnode, self.snap)
            self.assertEqual(ret, 0, "Failed to Activate snapshot "
                             "%s" % self.snap)
            g.log.info("snapshot %s activated successfully", self.snap)

        # Validate IO is completed
        self.assertTrue(validate_io_procs(all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")
        self.io_validation_complete = True

        # validate snapshots are listed under .snaps directory
        g.log.info("Validating snaps under .snaps")
        ret = view_snaps_from_mount(self.mounts, s_list)
        self.assertTrue(ret, "Failed to list snaps under .snaps" "directory")
        g.log.info("Snapshots Validated successfully")
コード例 #6
0
    def test_snap_clone_snapd(self):
        """
        Steps:

        1. create a volume
        2. Create a snapshots and activate
        3. Clone the snapshot and mount it
        4. Check for snapd daemon
        5. enable uss and validate snapd
        5. stop cloned volume
        6. Validate snapd
        7. start cloned volume
        8. validate snapd
        9. Create 5 more snapshot
        10. Validate total number of
            snapshots created.
        11. Activate 5 snapshots
        12. Enable USS
        13. Validate snapd
        14. kill snapd on all nodes
        15. validate snapd running
        16. force start clone volume
        17. validate snaps inside .snaps directory
        """
        # pylint: disable=too-many-statements, too-many-locals

        # Starting I/O
        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s create_files "
                   "-f 10 --base-file-name file %s" % (
                       self.script_upload_path,
                       mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system, cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # Validate I/O
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("IO is successful on all mounts")

        # Creating snapshot
        ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
        self.assertEqual(ret, 0, ("Failed to create snapshot for volume %s"
                                  % self.volname))
        g.log.info("Snapshot %s created successfully for "
                   "volume %s", self.snap, self.volname)

        # Activating created snapshots
        ret, _, _ = snap_activate(self.mnode, self.snap)
        self.assertEqual(ret, 0, ("Failed to activate snapshot %s"
                                  % self.snap))
        g.log.info("Snapshot snap%s activated successfully", self.snap)

        # Snapshot list
        self.assertIsNotNone(
            get_snap_list(self.mnode), "Failed to list snapshot")
        g.log.info("Snapshot list command Successful")

        # Creating and starting a Clone of snapshot:
        ret, _, _ = snap_clone(self.mnode, self.snap, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to clone %s" % self.clone_vol1)
        g.log.info("Clone volume %s created successfully", self.clone_vol1)

        # Start the clone volumes
        ret, _, _ = volume_start(self.mnode, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to start %s" % self.clone_vol1)
        g.log.info("%s started successfully", self.clone_vol1)

        # Form server list
        brick_list = get_all_bricks(self.mnode, self.clone_vol1)
        for bricks in brick_list:
            self.server_lists.append(bricks.split(":")[0])
        self.server_list = list(set(self.server_lists))

        # Get volume info
        vol_info = get_volume_info(self.mnode, self.clone_vol1)
        self.assertIsNotNone(vol_info, "Failed to get vol info")
        g.log.info("Successfully in getting vol info")

        # Redefining mounts for cloned volume
        self.mount_points, self.mounts_dict_list = [], []
        for client in self.all_clients_info:
            mount = {
                'protocol': self.mount_type,
                'server': self.mnode,
                'volname': self.volname,
                'client': self.all_clients_info[client],
                'mountpoint': (path.join(
                    "%s" % self.mpoint)),
                'options': ''
            }
            self.mounts_dict_list.append(mount)
        self.mount1 = create_mount_objs(self.mounts_dict_list)
        self.mount_points.append(self.mpoint)
        g.log.info("Successfully made entry in self.mount1")

        # FUSE mount clone1 volume
        for mount_obj in self.mounts:
            ret, _, _ = mount_volume(self.clone_vol1, self.mount_type,
                                     self.mpoint,
                                     self.mnode, mount_obj.client_system)
            self.assertEqual(ret, 0, "Volume mount failed for clone1")
            g.log.info("%s mounted Successfully", self.clone_vol1)

            # Validate clone volume is mounted or not
            ret = is_mounted(self.clone_vol1, self.mpoint, self.mnode,
                             mount_obj.client_system, self.mount_type)
            self.assertTrue(ret, "Volume not mounted on mount point: "
                            "%s" % self.mpoint)
            g.log.info("Volume %s mounted on %s", self.clone_vol1, self.mpoint)

        # Log Cloned Volume information
        ret = log_volume_info_and_status(self.mnode, self.clone_vol1)
        self.assertTrue("Failed to Log Info and Status of Volume "
                        "%s" % self.clone_vol1)
        g.log.info("Successfully Logged Info and Status")

        # Validate snapd running on all nodes
        self.validate_snapd(check_condition=False)

        # Enable USS
        ret, _, _ = enable_uss(self.mnode, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to enable USS on cloned volume")
        g.log.info("Successfully enabled USS on Cloned volume")

        # Validate USS running
        self.validate_uss()

        # Validate snapd running on all nodes
        self.validate_snapd()

        # Stop cloned volume
        ret, _, _ = volume_stop(self.mnode, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to stop cloned volume "
                         "%s" % self.clone_vol1)
        g.log.info("Successfully Stopped Cloned volume %s", self.clone_vol1)

        # Validate snapd running on all nodes
        self.validate_snapd(check_condition=False)

        # Start cloned volume
        ret, _, _ = volume_start(self.mnode, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to start cloned volume"
                         " %s" % self.clone_vol1)
        g.log.info("Successfully started cloned volume"
                   " %s", self.clone_vol1)

        # Validate snapd running on all nodes
        self.validate_snapd()

        # Create 5 snapshots
        self.snaps_list = [('test_snap_clone_snapd-snap%s'
                            % i)for i in range(0, 5)]
        for snapname in self.snaps_list:
            ret, _, _ = snap_create(self.mnode, self.clone_vol1,
                                    snapname)
            self.assertEqual(ret, 0, ("Failed to create snapshot for volume"
                                      " %s" % self.clone_vol1))
            g.log.info("Snapshot %s created successfully for volume "
                       "%s", snapname, self.clone_vol1)

        # Validate USS running
        self.validate_uss()

        # Check snapshot under .snaps directory
        self.check_snaps()

        # Activate Snapshots
        for snapname in self.snaps_list:
            ret, _, _ = snap_activate(self.mnode, snapname)
            self.assertEqual(ret, 0, ("Failed to activate snapshot %s"
                                      % snapname))
            g.log.info("Snapshot %s activated "
                       "successfully", snapname)

        # Validate USS running
        self.validate_uss()

        # Validate snapshots under .snaps folder
        self.validate_snaps()

        # Kill snapd on node and validate snapd except management node
        for server in self.servers[1:]:
            ret, _, _ = terminate_snapd_on_node(server)
            self.assertEqual(ret, 0, "Failed to Kill snapd on node %s"
                             % server)
            g.log.info("snapd Killed Successfully on node %s", server)

            # Check snapd running
            ret = is_snapd_running(server, self.clone_vol1)
            self.assertTrue(ret, "Unexpected: Snapd running on node: "
                            "%s" % server)
            g.log.info("Expected: Snapd is not running on node:%s", server)

            # Check snapshots under .snaps folder
            g.log.info("Validating snapshots under .snaps")
            ret, _, _ = uss_list_snaps(self.clients[0], self.mpoint)
            self.assertEqual(ret, 0, "Target endpoint not connected")
            g.log.info("Successfully listed snapshots under .snaps")

        # Kill snapd in management node
        ret, _, _ = terminate_snapd_on_node(self.servers[0])
        self.assertEqual(ret, 0, "Failed to Kill snapd on node %s"
                         % self.servers[0])
        g.log.info("snapd Killed Successfully on node %s", self.servers[0])

        # Validate snapd running on all nodes
        self.validate_snapd(check_condition=False)

        # Validating snapshots under .snaps
        ret, _, _ = uss_list_snaps(self.clients[0], self.mpoint)
        self.assertNotEqual(ret, 0, "Unexpected: Successfully listed "
                            "snapshots under .snaps")
        g.log.info("Expected: Target endpoint not connected")

        # Start the Cloned volume(force start)
        ret, _, _ = volume_start(self.mnode, self.clone_vol1, force=True)
        self.assertEqual(ret, 0, "Failed to start cloned volume "
                         "%s" % self.clone_vol1)
        g.log.info("Successfully Started Cloned volume %s", self.clone_vol1)

        # Validate snapd running on all nodes
        self.validate_snapd()

        # Validate snapshots under .snaps folder
        self.validate_snaps()
コード例 #7
0
    def test_uss_snap_restore(self):
        """
        Description:
            This test case will validate USS after Snapshot restore.
            The restored snapshot should not be listed under the '.snaps'
            directory.

        * Perform I/O on mounts
        * Enable USS on volume
        * Validate USS is enabled
        * Create a snapshot
        * Activate the snapshot
        * Perform some more I/O
        * Create another snapshot
        * Activate the second
        * Restore volume to the second snapshot
        * From mount point validate under .snaps
          - first snapshot should be listed
          - second snapshot should not be listed
        """

        # pylint: disable=too-many-statements
        # Perform I/O
        cmd = ("/usr/bin/env python %s create_files "
               "-f 10 --base-file-name firstfiles %s" %
               (self.script_upload_path, self.mounts[0].mountpoint))
        proc = g.run_async(self.mounts[0].client_system,
                           cmd,
                           user=self.mounts[0].user)
        self.all_mounts_procs.append(proc)

        # Wait for IO to complete and validate IO
        self.assertTrue(
            wait_for_io_to_complete(self.all_mounts_procs, self.mounts[0]),
            "IO failed on %s" % self.mounts[0])
        g.log.info("IO is successful on all mounts")

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Enable USS
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable USS on volume")
        g.log.info("Successfully enabled USS on volume")

        # Validate USS is enabled
        ret = is_uss_enabled(self.mnode, self.volname)
        self.assertTrue(ret, "USS is disabled on volume %s" % self.volname)
        g.log.info("USS enabled on volume %s", self.volname)

        # Create a snapshot
        ret, _, _ = snap_create(self.mnode, self.volname, self.snapshots[0])
        self.assertEqual(ret, 0,
                         ("Failed to create snapshot for %s" % self.volname))
        g.log.info("Snapshot %s created successfully for volume  %s",
                   self.snapshots[0], self.volname)

        # Check for number of snaps using snap_list it should be 1 now
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            1, len(snap_list), "No of snaps not consistent "
            "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snapshots")

        # Activate the snapshot
        ret, _, _ = snap_activate(self.mnode, self.snapshots[0])
        self.assertEqual(
            ret, 0, ("Failed to activate snapshot %s" % self.snapshots[0]))
        g.log.info("Snapshot %s activated successfully", self.snapshots[0])

        # Perform I/O
        self.all_mounts_procs = []
        cmd = ("/usr/bin/env python %s create_files "
               "-f 10 --base-file-name secondfiles %s" %
               (self.script_upload_path, self.mounts[0].mountpoint))
        proc = g.run_async(self.mounts[0].client_system,
                           cmd,
                           user=self.mounts[0].user)
        self.all_mounts_procs.append(proc)

        # Wait for IO to complete and validate IO
        self.assertTrue(
            wait_for_io_to_complete(self.all_mounts_procs, self.mounts[0]),
            "IO failed on %s" % self.mounts[0])
        g.log.info("IO is successful on all mounts")

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Create another snapshot
        ret, _, _ = snap_create(self.mnode, self.volname, self.snapshots[1])
        self.assertEqual(
            ret, 0, ("Failed to create snapshot for volume %s" % self.volname))
        g.log.info("Snapshot %s created successfully for volume  %s",
                   self.snapshots[1], self.volname)

        # Check for number of snaps using snap_list it should be 2 now
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            2, len(snap_list), "No of snaps not consistent "
            "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snapshots")

        # Activate the second snapshot
        ret, _, _ = snap_activate(self.mnode, self.snapshots[1])
        self.assertEqual(
            ret, 0, ("Failed to activate snapshot %s" % self.snapshots[1]))
        g.log.info("Snapshot %s activated successfully", self.snapshots[1])

        # Restore volume to the second snapshot
        ret = snap_restore_complete(self.mnode, self.volname,
                                    self.snapshots[1])
        self.assertTrue(ret, ("Failed to restore snap %s on the "
                              "volume %s" % (self.snapshots[1], self.volname)))
        g.log.info("Restore of volume is successful from %s on "
                   "volume %s", self.snapshots[1], self.volname)

        # Verify all volume processes are online
        ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
        self.assertTrue(ret, "Failed: All volume processes are not online")
        g.log.info("All volume processes are online")
        ret = is_snapd_running(self.mnode, self.volname)
        self.assertTrue(
            ret, "Failed: snapd is not running for volume %s" % self.volname)
        g.log.info("Successful: snapd is running")

        # List activated snapshots under the .snaps directory
        snap_dir_list = get_uss_list_snaps(self.mounts[0].client_system,
                                           self.mounts[0].mountpoint)
        self.assertIsNotNone(
            snap_dir_list, "Failed to list snapshots under .snaps directory")
        g.log.info("Successfully gathered list of snapshots under the .snaps"
                   " directory")

        # Check for first snapshot as it should get listed here
        self.assertIn(self.snapshots[0], snap_dir_list,
                      ("Unexpected : %s not listed under .snaps "
                       "directory" % self.snapshots[0]))
        g.log.info("Activated Snapshot %s listed Successfully",
                   self.snapshots[0])

        # Check for second snapshot as it should not get listed here
        self.assertNotIn(self.snapshots[1], snap_dir_list,
                         ("Unexpected : %s listed in .snaps "
                          "directory" % self.snapshots[1]))
        g.log.info("Restored Snapshot %s not listed ", self.snapshots[1])
コード例 #8
0
    def test_bitd_scrubd_snapd_after_volume_reset(self):
        '''
        -> Create volume
        -> Enable BitD, Scrub and Uss on volume
        -> Verify  the BitD, Scrub and Uss  daemons are running on every node
        -> Reset the volume
        -> Verify the Daemons (BitD, Scrub & Uss ) are running or not
        -> Eanble Uss on same volume
        -> Reset the volume with force
        -> Verify all the daemons(BitD, Scrub & Uss) are running or not
        :return:
        '''

        # enable bitrot and scrub on volume
        g.log.info("Enabling bitrot")
        ret, out, _ = enable_bitrot(self.mnode, self.volname)
        self.assertEqual(
            ret, 0, "Failed to enable bitrot on\
        volume: %s" % self.volname)
        g.log.info("Bitd and scrub daemons enabled\
        successfully on volume :%s" % self.volname)

        # enable uss on volume
        g.log.info("Enabling snaphot(uss)")
        ret, out, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(
            ret, 0, "Failed to enable uss on\
        volume: %s" % self.volname)
        g.log.info("uss enabled successfully on  volume :%s" % self.volname)

        # Checks bitd, snapd, scrub daemons running or not
        g.log.info("checking snapshot, scrub and bitrot\
        daemons running or not")
        for mnode in self.servers:
            ret = is_bitd_running(mnode, self.volname)
            self.assertTrue(
                ret, "Bitrot Daemon\
            not running on %s server:" % mnode)
            ret = is_scrub_process_running(mnode, self.volname)
            self.assertTrue(
                ret, "Scrub Daemon\
            not running on %s server:" % mnode)
            ret = is_snapd_running(mnode, self.volname)
            self.assertTrue(
                ret, "Snap Daemon\
            not running %s server:" % mnode)
        g.log.info("bitd, scrub and snapd running\
        successflly on volume :%s" % self.volname)

        # command for volume reset
        g.log.info("started resetting volume")
        cmd = "gluster volume reset " + self.volname
        ret, out, _ = g.run(self.mnode, cmd)
        self.assertEqual(
            ret, 0, "volume reset failed\
        for : %s" % self.volname)
        g.log.info("volume resetted succefully :%s" % self.volname)
        '''
        After volume reset snap daemon will not be running,
        bitd and scrub deamons will be in running state.
        '''
        g.log.info("checking snapshot, scrub and bitrot daemons\
        running or not after volume reset")
        for mnode in self.servers:
            ret = is_bitd_running(mnode, self.volname)
            self.assertTrue(
                ret, "Bitrot Daemon\
            not running on %s server:" % mnode)
            ret = is_scrub_process_running(mnode, self.volname)
            self.assertTrue(
                ret, "Scrub Daemon\
            not running on %s server:" % mnode)
            ret = is_snapd_running(mnode, self.volname)
            self.assertFalse(
                ret, "Snap Daemon should not be\
            running on %s server after volume reset:" % mnode)
        g.log.info("bitd and scrub daemons are running after volume reset\
        snapd is not running as expected on volume :%s" % self.volname)

        # enable uss on volume
        g.log.info("Enabling snaphot(uss)")
        ret, out, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(
            ret, 0, "Failed to enable\
        uss on volume: %s" % self.volname)
        g.log.info("uss enabled successfully on volume :%s" % self.volname)

        # command for volume reset with force
        g.log.info("started resetting volume with force option")
        cmd = "gluster volume reset " + self.volname + " force"
        ret, out, _ = g.run(self.mnode, cmd)
        self.assertEqual(
            ret, 0, "volume reset fail\
               for : %s" % self.volname)
        g.log.info("Volume resetted sucessfully with\
        force option :%s" % self.volname)
        '''
         After volume reset bitd, snapd, scrub daemons will not be running,
         all three daemons will get die
         '''
        g.log.info("checking snapshot, scrub and bitrot daemons\
        running or not after volume reset with force")
        for mnode in self.servers:
            ret = is_bitd_running(mnode, self.volname)
            self.assertFalse(
                ret, "Bitrot Daemon should not be\
            running on %s server after volume reset with force:" % mnode)
            ret = is_scrub_process_running(mnode, self.volname)
            self.assertFalse(
                ret, "Scrub Daemon shiuld not be running\
            on %s server after volume reset with force:" % mnode)
            ret = is_snapd_running(mnode, self.volname)
            self.assertFalse(
                ret, "Snap Daemon should not be\
            running on %s server after volume reset force:" % mnode)
        g.log.info(
            "After volume reset bitd, scrub and snapd are not running after\
        volume reset with force on volume :%s" % self.volname)