Exemplo n.º 1
0
    def setUpClass(cls):
        """Upload the necessary scripts to run tests.
        """
        # Calling GlusterBaseClass setUpClass
        GlusterBaseClass.setUpClass.im_func(cls)

        # Upload io scripts for running IO on mounts
        g.log.info(
            "Upload io scripts to clients %s for running IO on "
            "mounts", cls.clients)
        script_local_path = ("/usr/share/glustolibs/io/scripts/"
                             "file_dir_ops.py")
        cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
                                  "file_dir_ops.py")
        ret = upload_scripts(cls.clients, script_local_path)
        if not ret:
            raise ExecutionError("Failed to upload IO scripts to clients %s" %
                                 cls.clients)
        g.log.info("Successfully uploaded IO scripts to clients %s",
                   cls.clients)
Exemplo n.º 2
0
    def tearDown(self):
        brick_list = get_all_bricks(self.mnode, self.volname)
        for brick in brick_list:
            brick_node, _ = brick.split(":")
            del_user(brick_node, "test_user1")
            del_user(brick_node, "test_user2")

        for mount_obj in self.mounts:
            del_user(mount_obj.client_system, "test_user1")
            del_user(mount_obj.client_system, "test_user2")

        # Unmount Volume and Cleanup Volume
        g.log.info("Starting to Unmount Volume and Cleanup Volume")
        ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
        if not ret:
            raise ExecutionError("Failed to Unmount Volume and Cleanup Volume")
        g.log.info("Successful in Unmount Volume and Cleanup Volume")

        # Calling GlusterBaseClass tearDown
        GlusterBaseClass.tearDownClass.im_func(self)
Exemplo n.º 3
0
    def setUpClass(cls):
        # Calling GlusterBaseClass setUpClass
        GlusterBaseClass.setUpClass.im_func(cls)

        # Override replica count to be 3
        if cls.volume_type == "replicated":
            cls.volume['voltype'] = {
                'type': 'replicated',
                'replica_count': 3,
                'transport': 'tcp'}

        # Upload io scripts for running IO on mounts
        g.log.info("Upload io scripts to clients %s for running IO on "
                   "mounts", cls.clients)
        script_abs_path = "/usr/share/glustolibs/io/scripts/file_dir_ops.py"
        cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
                                  "file_dir_ops.py")
        ret = upload_scripts(cls.clients, script_abs_path)
        if not ret:
            raise ExecutionError("Failed to upload IO scripts to clients")
Exemplo n.º 4
0
    def setUpClass(cls):
        # Calling GlusterBaseClass setUpClass
        GlusterBaseClass.setUpClass.im_func(cls)

        # Upload io scripts for running IO on mounts
        g.log.info("Upload io scripts to clients %s for running IO on mounts" %
                   cls.clients)
        script_local_path = ("/usr/share/glustolibs/io/scripts/"
                             "file_dir_ops.py")
        cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
                                  "file_dir_ops.py")
        ret = upload_scripts(cls.clients, [script_local_path])
        if not ret:
            raise ExecutionError("Failed to upload IO scripts to clients %s" %
                                 cls.clients)
        g.log.info("Successfully uploaded IO scripts to clients %s" %
                   cls.clients)

        cls.counter = 1
        """int: Value of counter is used for dirname-start-num argument for
Exemplo n.º 5
0
def is_hard_limit_exceeded(mnode, volname, path=None):
    """Parse the output of 'gluster quota list' command.

    Args:
        mnode (str): Node on which command has to be executed.
        volname (str): volume name

    Kwargs:
        path (str): Quota path

    Returns:
        boolean: True if exceeded, False if not.

    Examples:
        >>> get_quota_list('abc.lab.eng.xyz.com', "testvol")
        {'/': {'used_space': '0', 'hl_exceeded': 'No', 'soft_limit_percent':
        '60%', 'avail_space': '2147483648', 'soft_limit_value': '1288490188',
        'sl_exceeded': 'No', 'hard_limit': '2147483648'}}
    """
    if not path:
        path = ''

    cmd = "gluster volume quota %s list %s --xml" % (volname, path)
    ret, out, _ = g.run(mnode, cmd)
    if ret != 0:
        g.log.error("Failed to execute 'quota list' on node %s. "
                    "Hence failed to get the quota list." % mnode)
        raise ExecutionError("Quota list --xml command failed")
    else:
        try:
            root = etree.XML(out)
        except etree.ParseError:
            raise ExecutionParseError("Failed to parse the gluster quota "
                                      "list xml output.")
        else:
            for path in root.findall("volQuota/limit"):
                for elem in path.getchildren():
                    if elem.tag == 'hl_exceeded':
                        if elem.text == 'Yes':
                            return True
            return False
    def tearDown(self):
        # Deleting the user which was created in setUp
        for mount_object in self.mounts:
            # Delete user
            g.log.info('Deleting user qa...')
            command = "userdel -r qa"
            ret, _, err = g.run(mount_object.client_system, command)

            if 'does not exist' in err:
                g.log.warn('User qa is already deleted')
            else:
                g.log.info('User qa successfully deleted')
        # Cleanup and umount volume
        g.log.info("Starting to Unmount Volume and Cleanup Volume")
        ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
        if not ret:
            raise ExecutionError("Failed to umount the vol & cleanup Volume")
        g.log.info("Successful in umounting the volume and Cleanup")

        # Calling GlusterBaseClass teardown
        GlusterBaseClass.tearDown.im_func(self)
Exemplo n.º 7
0
    def tearDown(self):
        """
        Unmount and cleanup the volumes
        """
        # Unmount volumes
        all_mounts = self.mounts + self.mounts_new
        for mount_obj in all_mounts:
            ret = mount_obj.unmount()
            if ret:
                g.log.info("Successfully unmounted volume %s from %s",
                           mount_obj.volname, mount_obj.client_system)
            else:
                g.log.error("Failed to unmount volume %s from %s",
                            mount_obj.volname, mount_obj.client_system)

        # Cleanup volumes
        for volume in self.volname, self.volname_new:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Failed to cleanup volume %s", volume)
            g.log.info("Volume %s deleted successfully", volume)
Exemplo n.º 8
0
    def setUpClass(cls):
        GlusterBaseClass.setUpClass.im_func(cls)
        cls.snap1 = "snap1"
        cls.snap2 = "snap21"
        cls.clone1 = "clone1"
        cls.clone2 = "clone2"
        cls.mpoint1 = "/mnt/clone1"
        cls.mpoint2 = "/mnt/clone2"

        # Upload io scripts for running IO on mounts
        g.log.info(
            "Upload io scripts to clients %s for running IO on "
            "mounts", cls.clients)
        script_local_path = ("/usr/share/glustolibs/io/scripts/"
                             "file_dir_ops.py")
        cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
                                  "file_dir_ops.py")
        ret = upload_scripts(cls.clients, script_local_path)
        if not ret:
            raise ExecutionError("Failed to upload IO scripts " "to clients ")
        g.log.info("Successfully uploaded IO scripts to clients %s")
Exemplo n.º 9
0
    def tearDown(self):
        """
        Cleanup and umount volume
        """

        # Delete user
        for mount_object in self.mounts:
            self.delete_user(mount_object.client_system, 'qa')

        for server in self.servers:
            self.delete_user(server, 'qa')

        # Cleanup and umount volume
        g.log.info("Starting to Unmount Volume and Cleanup Volume")
        ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
        if not ret:
            raise ExecutionError("Failed to umount the vol & cleanup Volume")
        g.log.info("Successful in umounting the volume and Cleanup")

        # Calling GlusterBaseClass teardown
        self.get_super_method(self, 'tearDown')()
    def setUpClass(cls):
        """
        Setup nfs-ganesha if not exists.
        Upload IO scripts to clients
        """
        cls.get_super_method(cls, 'setUpClass')()

        # Upload IO scripts for running IO on mounts
        g.log.info(
            "Upload io scripts to clients %s for running IO on "
            "mounts", cls.clients)
        script_local_path = ("/usr/share/glustolibs/io/scripts/"
                             "file_dir_ops.py")
        cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
                                  "file_dir_ops.py")
        ret = upload_scripts(cls.clients, script_local_path)
        if not ret:
            raise ExecutionError("Failed to upload IO scripts to clients %s" %
                                 cls.clients)
        g.log.info("Successfully uploaded IO scripts to clients %s",
                   cls.clients)
Exemplo n.º 11
0
    def setUp(self):
        # Calling GlusterBaseClass setUp
        self.get_super_method(self, 'setUp')()

        # Setup Volume and Mount Volume
        g.log.info("Starting to Setup Volume and Mount Volume")
        ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
        if not ret:
            raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
        g.log.info("Successful in Setup Volume and Mount Volume")

        brick_list = get_all_bricks(self.mnode, self.volname)
        # Add user on all nodes
        for brick in brick_list:
            brick_node, _ = brick.split(":")
            add_user(brick_node, "test_user1")
            add_user(brick_node, "test_user2")

        for mount_obj in self.mounts:
            add_user(mount_obj.client_system, "test_user1")
            add_user(mount_obj.client_system, "test_user2")
Exemplo n.º 12
0
    def tearDown(self):

        status_info = get_remove_brick_status(
            self.mnode, self.volname, bricks_list=self.remove_brick_list)
        status = status_info['aggregate']['statusStr']
        if 'in progress' in status:
            # Shrink volume by removing bricks with option start
            g.log.info("Vol %s: Stop remove brick", self.volname)
            ret, _, _ = remove_brick(self.mnode, self.volname,
                                     self.remove_brick_list, "stop")
            g.log.info("Volume %s shrink stopped ", self.volname)

        # Unmount Volume and Cleanup Volume
        g.log.info("Starting to Unmount Volume and Cleanup Volume")
        ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
        if not ret:
            raise ExecutionError("Failed to Unmount Volume and Cleanup Volume")
        g.log.info("Successful in Unmount Volume and Cleanup Volume")

        # Calling GlusterBaseClass tearDown
        GlusterBaseClass.tearDown.im_func(self)
Exemplo n.º 13
0
    def setUp(self):
        """
        - Setup Volume and Mount Volume
        - setUp starts the io from all the mounts.
        - IO creates deep dirs and files.
        """
        # Calling GlusterBaseClass setUp
        self.get_super_method(self, 'setUp')()

        # Setup Volume and Mount Volume
        g.log.info("Starting to Setup Volume and Mount Volume")
        ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
        if not ret:
            raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
        g.log.info("Successful in Setup Volume and Mount Volume")

        # Start IO on mounts
        g.log.info("Starting IO on all mounts...")
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = (
                "/usr/bin/env python %s create_deep_dirs_with_files "
                "--dirname-start-num %d "
                "--dir-depth 2 "
                "--dir-length 15 "
                "--max-num-of-dirs 5 "
                "--num-of-files 5 %s" %
                (self.script_upload_path, self.counter, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
            self.counter = self.counter + 10
        self.io_validation_complete = False

        # Adding a delay of 15 seconds before test method starts. This
        # is to ensure IO's are in progress and giving some time to fill data
        time.sleep(15)
    def setUp(self):
        """
        Setup and mount volume or raise ExecutionError
        """
        self.get_super_method(self, 'setUp')()
        # Change the dist count to 4 in case of 'distributed-replicated' ,
        # 'distributed-dispersed' and 'distributed-arbiter'
        if self.volume_type in ("distributed-replicated",
                                "distributed-dispersed",
                                "distributed-arbiter"):
            self.volume['voltype']['dist_count'] = 4

        # Setup Volume
        ret = self.setup_volume_and_mount_volume(self.mounts)
        if not ret:
            raise ExecutionError("Failed to Setup and Mount Volume")
        mount_obj = self.mounts[0]
        self.mountpoint = mount_obj.mountpoint

        # Collect subvols
        self.subvols = (get_subvols
                        (self.mnode, self.volname))['volume_subvols']
Exemplo n.º 15
0
    def tearDown(self):

        # Unmount and clean volume
        if not self.unmount_volume_and_cleanup_volume([self.mounts[0]]):
            raise ExecutionError("Failed to Cleanup Volume")

        if self.is_second_volume_created:
            # Stop the 2nd volume
            ret, _, _ = volume_stop(self.mnode, self.second_vol_name)
            self.assertEqual(
                ret, 0, ("volume stop failed for %s" % self.second_vol_name))
            g.log.info("Volume %s stopped", self.second_vol_name)

            # Delete the 2nd volume
            ret = volume_delete(self.mnode, self.second_vol_name)
            self.assertTrue(ret, ("Failed to cleanup the Volume "
                                  "%s", self.second_vol_name))
            g.log.info("Volume deleted successfully : %s",
                       self.second_vol_name)

        # Calling GlusterBaseClass tearDown
        self.get_super_method(self, 'tearDown')()
Exemplo n.º 16
0
    def setUpClass(cls):
        # Calling GlusterBaseClass setUpClass
        cls.get_super_method(cls, 'setUpClass')()

        # Upload io scripts for running IO on mounts
        g.log.info("Upload io scripts to clients %s for running IO on mounts",
                   cls.clients)
        cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
                                  "file_dir_ops.py")
        ret = upload_scripts(cls.clients, cls.script_upload_path)
        if not ret:
            raise ExecutionError("Failed to upload IO scripts to clients %s" %
                                 cls.clients)
        g.log.info("Successfully uploaded IO scripts to clients %s",
                   cls.clients)

        if cls.volume_type == "distributed":
            # Define x1 distributed volume
            cls.volume['voltype'] = {
                'type': 'distributed',
                'dist_count': 1,
                'transport': 'tcp'
            }
Exemplo n.º 17
0
    def tearDown(self):
        # Wait for I/O if not completed
        if self.is_io_running:
            if not self._wait_for_untar_completion():
                g.log.error("I/O failed to stop on clients")

        # Unmounting and cleaning volume
        ret = self.unmount_volume_and_cleanup_volume([self.mounts[0]])
        if not ret:
            raise ExecutionError("Unable to delete volume % s" % self.volname)

        # Calling GlusterBaseClass tearDown
        self.get_super_method(self, 'tearDown')()

        # Clearing bricks
        for subvol in self.subvols:
            for brick in subvol:
                g.log.info('Clearing brick %s', brick)
                node, brick_path = brick.split(':')
                ret, _, err = g.run(node, 'rm -rf %s' % brick_path)
                self.assertFalse(ret, err)
                g.log.info('Clearing brick %s is successful', brick)
        g.log.info('Clearing for all brick is successful')
Exemplo n.º 18
0
    def tearDown(self):
        # Check if a node is still down
        if self.glusterd_is_stopped:
            ret = start_glusterd(self.random_server)
            self.assertTrue(
                ret, "Failed to start glusterd on %s" % self.random_server)
            g.log.info("Successfully started glusterd on node: %s",
                       self.random_server)

            # Waiting for glusterd to start completely
            ret = wait_for_glusterd_to_start(self.random_server)
            self.assertTrue(
                ret, "glusterd is not running on %s" % self.random_server)
            g.log.info("glusterd is started and running on %s",
                       self.random_server)

        # Unmounting and cleaning volume.
        ret = self.unmount_volume_and_cleanup_volume(self.mounts)
        if not ret:
            raise ExecutionError("Unable to delete volume % s" % self.volname)
        g.log.info("Volume deleted successfully : %s", self.volname)

        self.get_super_method(self, 'tearDown')()
Exemplo n.º 19
0
    def setUp(self):
        # Calling GlusterBaseClass setUp
        self.get_super_method(self, 'setUp')()

        # Ensure we have sufficient subvols
        self.volume['voltype']['dist_count'] = 4

        # Setup Volume and Mount Volume
        ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
        if not ret:
            raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
        g.log.info("Successful in Setup Volume and Mount Volume")

        # Start IO on mounts
        self.counter = 1
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = (
                "/usr/bin/env python %s create_deep_dirs_with_files "
                "--dirname-start-num %d "
                "--dir-depth 2 "
                "--dir-length 5 "
                "--max-num-of-dirs 3 "
                "--num-of-files 3 %s" %
                (self.script_upload_path, self.counter, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
            self.counter += 10
        self.io_validation_complete = False

        # Adding a delay of 10 seconds before test method starts. This
        # is to ensure IO's are in progress and giving some time to fill data
        sleep(10)
Exemplo n.º 20
0
    def tearDown(self):
        # Calling GlusterBaseClass teardown
        GlusterBaseClass.tearDown.im_func(self)

        # Disable Activate on create
        option = {'activate-on-create': 'disable'}
        ret, _, _ = set_snap_config(self.mnode, option)
        if ret != 0:
            raise ExecutionError("Failed to set activateOnCreate"
                                 "config option")
        g.log.info("ActivateOnCreate config option Successfully set")

        # umount clone volume
        g.log.info("Unmounting clone volume")
        ret, _, _ = umount_volume(self.mounts[0].client_system, self.mpoint1)
        if ret != 0:
            raise ExecutionError("Failed to unmount clone "
                                 "volume %s" % self.clone1)
        g.log.info("Successfully unmounted clone volume %s", self.clone1)

        ret, _, _ = umount_volume(self.mounts[0].client_system, self.mpoint2)
        if ret != 0:
            raise ExecutionError("Failed to unmount clone "
                                 "volume %s" % self.clone2)
        g.log.info("Successfully unmounted clone volume %s", self.clone2)

        # cleanup volume
        g.log.info("starting to cleanup volume")
        ret1 = cleanup_volume(self.mnode, self.clone1)
        ret2 = cleanup_volume(self.mnode, self.clone2)
        if not ret1:
            raise ExecutionError("Failed to cleanup %s clone "
                                 "volume" % self.clone1)
        if not ret2:
            raise ExecutionError("Failed to cleanup %s clone "
                                 "volume" % self.clone2)
        g.log.info("Successfully cleanedup cloned volumes")

        # Unmount and cleanup-volume
        g.log.info("Starting to Unmount and cleanup-volume")
        ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
        if not ret:
            raise ExecutionError("Failed to Unmount and Cleanup Volume")
        g.log.info("Successful in Unmount Volume and Cleanup Volume")
    def setUp(self):
        # Calling GlusterBaseClass setUp
        GlusterBaseClass.setUp.im_func(self)

        # Setup Volumes
        if self.volume_type == "distributed-replicated":
            # Redefine distributed-replicated volume
            self.volume['voltype'] = {
                'type': 'distributed-replicated',
                'replica_count': 3,
                'dist_count': 2,
                'arbiter_count': 1,
                'transport': 'tcp'
            }

        self.all_mounts_procs = []
        self.io_validation_complete = False

        # Setup Volume and Mount Volume
        g.log.info("Starting to Setup Volume and Mount Volume")
        ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
        if not ret:
            raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
        g.log.info("Successful in Setup Volume and Mount Volume")
    def setUpClass(cls):
        # Calling GlusterBaseClass setUpClass
        GlusterBaseClass.setUpClass.im_func(cls)

        # Upload io scripts for running IO on mounts
        g.log.info("Upload io scripts to clients %s for running IO on mounts",
                   cls.clients)
        script_local_path = ("/usr/share/glustolibs/io/scripts/"
                             "file_dir_ops.py")
        cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
                                  "file_dir_ops.py")
        ret = upload_scripts(cls.clients, [script_local_path])
        if not ret:
            raise ExecutionError("Failed to upload IO scripts to clients %s"
                                 % cls.clients)
        g.log.info("Successfully uploaded IO scripts to clients %s",
                   cls.clients)

        if cls.volume_type == "replicated":
            # Define x3 replicated volume
            cls.volume['voltype'] = {
                'type': 'replicated',
                'replica_count': 3,
                'transport': 'tcp'}
Exemplo n.º 23
0
    def setUp(self):
        self.get_super_method(self, 'setUp')()

        # A single mount is enough for all the tests
        self.mounts = [self.mounts[0]]

        # For `test_heal_info_...` tests 6 replicas are needed
        if ('test_heal_info' in self.id().split('.')[-1]
                and self.volume_type.find('distributed') >= 0):
            self.volume['voltype']['dist_count'] = 6

        if not self.setup_volume_and_mount_volume(mounts=self.mounts):
            raise ExecutionError('Failed to setup and mount '
                                 '{}'.format(self.volname))

        self.client, self.m_point = (self.mounts[0].client_system,
                                     self.mounts[0].mountpoint)
        self.file_path = self.m_point + '/test_file'
        self._io_cmd = ('cat /dev/urandom | tr -dc [:space:][:print:] | '
                        'head -c {} ')
        # IO has to run for longer length for covering two scenarios in arbiter
        # volume type
        self.io_time = 600 if self.volume_type.find('arbiter') >= 0 else 300
        self.proc = ''
    def setUp(self):
        """
        Setup Volume and Mount Volume
        """
        # Calling GlusterBaseClass setUp
        self.get_super_method(self, 'setUp')()

        # Change the dist count to 4 in case of 'distributed-replicated' ,
        # 'distributed-dispersed' and 'distributed-arbiter'
        if self.volume_type in ("distributed-replicated",
                                "distributed-dispersed",
                                "distributed-arbiter"):
            self.volume['voltype']['dist_count'] = 4

        # Setup Volume and Mount Volume
        ret = self.setup_volume_and_mount_volume(mounts=[self.mounts[0]])
        if not ret:
            raise ExecutionError("Failed to Setup_Volume and Mount_Volume")

        self.mount_point = self.mounts[0].mountpoint

        self.subvols = (get_subvols(self.mnode,
                                    self.volname))['volume_subvols']
        self.assertIsNotNone(self.subvols, "failed to get subvols")
    def tearDown(self):

        # Setting storage.reserve to Default
        ret = set_volume_options(self.mnode, self.volname,
                                 {'storage.reserve': '0'})
        if not ret:
            raise ExecutionError("Failed to reset storage reserve on %s" %
                                 self.mnode)
        g.log.info("Able to reset storage reserve successfully on %s",
                   self.mnode)

        # Unmounting the volume.
        ret, _, _ = umount_volume(mclient=self.mounts[0].client_system,
                                  mpoint=self.mounts[0].mountpoint)
        if ret:
            raise ExecutionError("Volume %s is not unmounted" % self.volname)
        g.log.info("Volume unmounted successfully : %s", self.volname)
        ret = rmdir(self.mounts[0].client_system, self.mounts[0].mountpoint)
        if not ret:
            raise ExecutionError("Failed to remove directory mount directory.")
        g.log.info("Mount directory is removed successfully")

        # clean up all volumes
        vol_list = get_volume_list(self.mnode)
        if not vol_list:
            raise ExecutionError("Failed to get the volume list")

        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Unable to delete volume % s" % volume)
            g.log.info("Volume deleted successfully : %s", volume)

        # Cleaning the deleted volume bricks
        for brick in self.brick_list:
            node, brick_path = brick.split(r':')
            cmd = "rm -rf " + brick_path
            ret, _, _ = g.run(node, cmd)
            if ret:
                raise ExecutionError("Failed to delete the brick "
                                     "dir's of deleted volume")

        GlusterBaseClass.tearDown.im_func(self)
    def tearDown(self):
        """ Cleanup the volumes """
        if self.glusterd_is_stopped:
            ret = restart_glusterd(self.servers[1])
            if not ret:
                raise ExecutionError("Failed to start glusterd on node: %s"
                                     % self.servers[1])

            ret = wait_for_glusterd_to_start(self.servers[1])
            if not ret:
                raise ExecutionError("Glusterd is not yet started on node: %s"
                                     % self.servers[1])

        vol_list = get_volume_list(self.mnode)
        if vol_list is None:
            raise ExecutionError("Failed to get the volume list")

        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Unable to delete volume %s" % volume)

        # Disable multiplex
        ret = set_volume_options(self.mnode, 'all',
                                 {'cluster.brick-multiplex': 'disable'})
        if not ret:
            raise ExecutionError("Failed to disable brick mux in cluster")

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)

        # Calling baseclass tearDown method
        self.get_super_method(self, 'tearDown')()
Exemplo n.º 27
0
    def tearDown(self):
        # Restart glusterd on nodes for which it was stopped
        ret = restart_glusterd(self.servers[3:5])
        if not ret:
            raise ExecutionError("Failed to restart glusterd on nodes: %s" %
                                 self.servers[3:5])

        # Wait for glusterd to be online and validate it's running.
        ret = wait_for_glusterd_to_start(self.servers[3:5])
        if not ret:
            raise ExecutionError("Glusterd not up on the servers: %s" %
                                 self.servers[3:5])

        # clean up all volumes
        vol_list = get_volume_list(self.mnode)
        if vol_list is None:
            raise ExecutionError("Failed to get the volume list")

        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Unable to delete volume %s" % volume)
            g.log.info("Volume deleted successfully : %s", volume)

        # Peer probe detached servers
        ret = peer_probe_servers(self.mnode, self.servers[1:3])
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers[1:3])

        # Remove all the statedump files created in the test
        cmd = "rm -rf /var/run/gluster/glusterdump.*"
        ret, _, _ = g.run(self.mnode, cmd)
        if ret:
            raise ExecutionError("Failed to clear out the statedump files")

        self.get_super_method(self, 'tearDown')()
    def test_create_vol_used_bricks(self):
        '''
        -> Create distributed-replica Volume
        -> Add 6 bricks to the volume
        -> Mount the volume
        -> Perform some I/O's on mount point
        -> unmount the volume
        -> Stop and delete the volume
        -> Create another volume using bricks of deleted volume
        '''

        # Create and start a volume
        self.volume['name'] = "test_create_vol_with_fresh_bricks"
        self.volname = "test_create_vol_with_fresh_bricks"
        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertTrue(ret, "Failed to create and start volume")

        # Forming brick list
        brick_list = form_bricks_list(self.mnode, self.volname, 6,
                                      self.servers, self.all_servers_info)
        # Adding bricks to the volume
        ret, _, _ = add_brick(self.mnode, self.volname, brick_list)
        self.assertEqual(
            ret, 0, "Failed to add bricks to the volume %s" % self.volname)
        g.log.info("Bricks added successfully to the volume %s", self.volname)

        # Mounting the volume.
        for mount_obj in self.mounts:
            ret, _, _ = mount_volume(self.volname,
                                     mtype=self.mount_type,
                                     mpoint=mount_obj.mountpoint,
                                     mserver=self.mnode,
                                     mclient=mount_obj.client_system)
            self.assertEqual(ret, 0,
                             ("Volume %s is not mounted") % (self.volname))
            g.log.info("Volume mounted successfully : %s", self.volname)

        # run IOs
        g.log.info("Starting IO on all mounts...")
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = (
                "/usr/bin/env python %s create_deep_dirs_with_files "
                "--dirname-start-num %d --dir-depth 2 "
                "--dir-length 5 --max-num-of-dirs 3 "
                "--num-of-files 10 %s" %
                (self.script_upload_path, self.counter, mount_obj.mountpoint))

            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
            self.counter = self.counter + 10

        # Validate IO
        self.assertTrue(validate_io_procs(self.all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")

        # Unmouting the volume.
        for mount_obj in self.mounts:
            ret, _, _ = umount_volume(mclient=mount_obj.client_system,
                                      mpoint=mount_obj.mountpoint)
            self.assertEqual(ret, 0,
                             "Volume %s is not unmounted" % (self.volname))
            g.log.info("Volume unmounted successfully : %s", self.volname)

        # Getting brick list
        self.brick_list = get_all_bricks(self.mnode, self.volname)
        if not self.brick_list:
            raise ExecutionError("Failed to get the brick list of %s" %
                                 self.volname)

        # Stop volume
        ret, _, _ = volume_stop(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to stop the volume %s" % self.volname)
        g.log.info("Volume %s stopped successfully", self.volname)

        # Delete Volume
        ret, _, _ = g.run(
            self.mnode,
            "gluster volume delete %s --mode=script" % self.volname)
        self.assertEqual(ret, 0, "Failed to delete volume %s" % self.volname)
        g.log.info("Volume deleted successfully %s", self.volname)

        # Create another volume by using bricks of deleted volume
        self.volname = "test_create_vol_used_bricks"
        ret, _, err = volume_create(self.mnode,
                                    self.volname,
                                    brick_list[0:6],
                                    replica_count=3)
        self.assertNotEqual(
            ret, 0, "Volume creation should fail with used "
            "bricks but volume creation success")
        g.log.info("Failed to create volume with used bricks")

        # Checking failed message of volume creation
        msg = ' '.join([
            'volume create: test_create_vol_used_bricks: failed:',
            brick_list[0].split(':')[1], 'is already part of a volume'
        ])
        self.assertIn(
            msg, err, "Incorrect error message for volume creation "
            "with used bricks")
        g.log.info("correct error message for volume creation with "
                   "used bricks")
    def test_glusterd_replace_brick(self):
        """
        Create a volume and start it.
        - Get list of all the bricks which are online
        - Select a brick randomly from the bricks which are online
        - Form a non-existing brick path on node where the brick has to replace
        - Perform replace brick and it should fail
        - Form a new brick which valid brick path replace brick should succeed
        """
        # pylint: disable=too-many-function-args
        # Getting all the bricks which are online
        bricks_online = get_online_bricks_list(self.mnode, self.volname)
        self.assertIsNotNone(bricks_online, "Unable to get the online bricks")
        g.log.info("got the brick list from the volume")

        # Getting one random brick from the online bricks to be replaced
        brick_to_replace = random.choice(bricks_online)
        g.log.info("Brick to replace %s", brick_to_replace)
        node_for_brick_replace = brick_to_replace.split(':')[0]
        new_brick_to_replace = form_bricks_list(self.mnode, self.volname, 1,
                                                node_for_brick_replace,
                                                self.all_servers_info)

        # performing replace brick with non-existing brick path
        path = ":/brick/non_existing_path"
        non_existing_path = node_for_brick_replace + path

        # Replace brick for non-existing path
        ret, _, _ = replace_brick(self.mnode, self.volname, brick_to_replace,
                                  non_existing_path)
        self.assertNotEqual(ret, 0, ("Replace brick with commit force"
                                     " on a non-existing brick passed"))
        g.log.info("Replace brick with non-existing brick with commit"
                   "force failed as expected")

        # calling replace brick by passing brick_to_replace and
        # new_brick_to_replace with valid brick path
        ret = replace_brick_from_volume(self.mnode,
                                        self.volname,
                                        self.servers,
                                        self.all_servers_info,
                                        brick_to_replace,
                                        new_brick_to_replace[0],
                                        delete_brick=True)
        self.assertTrue(ret, ("Replace brick with commit force failed"))

        # Validating whether the brick replaced is online
        halt = 20
        counter = 0
        _rc = False
        g.log.info("Wait for some seconds for the replaced brick "
                   "to get online")
        while counter < halt:
            ret = are_bricks_online(self.mnode, self.volname,
                                    new_brick_to_replace)
            if not ret:
                g.log.info("The replaced brick isn't online, "
                           "Retry after 2 seconds .......")
                time.sleep(2)
                counter = counter + 2
            else:
                _rc = True
                g.log.info("The replaced brick is online after being replaced")
                break
        if not _rc:
            raise ExecutionError("The replaced brick isn't online")
    def test_subdir_with_quota_limit(self):

        # pylint: disable=too-many-statements
        """
        Mount the volume
        Create 2 subdir on mount point
        dir1-> /level1/subdir1 dir2->/dlevel1/dlevel2/dlevel3/subdir2
        Auth allow - Client1(/level1/subdir1),
        Client2(/dlevel1/dlevel2/dlevel3/subdir2)
        Mount the subdir1 on client 1 and subdir2 on client2
        Enable Quota
        Verify Quota is enabled on volume
        Set quota limit as 1GB and 2GB on both subdirs respectively
        Perform a quota list operation
        Perform IO's on both subdir until quota limit is almost hit for subdir1
        Again Perform a quota list operation
        Run IO's on Client 1.This should fail
        Run IO's on Client2.This should pass
        """

        # Create deep subdirectories  subdir1 and subdir2 on mount point
        ret = mkdir(self.mounts[0].client_system,
                    "%s/level1/subdir1" % self.mounts[0].mountpoint,
                    parents=True)
        self.assertTrue(
            ret, ("Failed to create directory '/level1/subdir1' on"
                  "volume %s from client %s" %
                  (self.mounts[0].volname, self.mounts[0].client_system)))
        ret = mkdir(self.mounts[0].client_system,
                    "%s/dlevel1/dlevel2/dlevel3/subdir2" %
                    self.mounts[0].mountpoint,
                    parents=True)
        self.assertTrue(
            ret, ("Failed to create directory "
                  "'/dlevel1/dlevel2/dlevel3/subdir2' on"
                  "volume %s from client %s" %
                  (self.mounts[0].volname, self.mounts[0].client_system)))
        # unmount volume
        ret = self.unmount_volume(self.mounts)
        self.assertTrue(ret, "Volumes Unmount failed")
        g.log.info("Volumes Unmounted successfully")

        # Set authentication on the subdirectory subdir1
        # and subdir2
        g.log.info(
            'Setting authentication on directories subdir1 and subdir2'
            'for client %s and %s', self.clients[0], self.clients[1])
        ret = set_auth_allow(
            self.volname, self.mnode, {
                '/level1/subdir1': [self.clients[0]],
                '/dlevel1/dlevel2/dlevel3/subdir2': [self.clients[1]]
            })
        self.assertTrue(
            ret, 'Failed to set Authentication on volume %s' % self.volume)

        # Creating mount list for subdirectories
        self.subdir_mounts = [
            copy.deepcopy(self.mounts[0]),
            copy.deepcopy(self.mounts[1])
        ]
        self.subdir_mounts[0].volname = "%s/level1/subdir1" % self.volname
        self.subdir_mounts[1].volname = ("%s/dlevel1/dlevel2/dlevel3/subdir2" %
                                         self.volname)

        # Mount Subdirectory "subdir1" on client 1 and "subdir2" on client 2
        for mount_obj in self.subdir_mounts:
            ret = mount_obj.mount()
            self.assertTrue(
                ret, ("Failed to mount  %s on client"
                      " %s" % (mount_obj.volname, mount_obj.client_system)))
            g.log.info("Successfully mounted %s on client %s",
                       mount_obj.volname, mount_obj.client_system)
        g.log.info("Successfully mounted subdirectories on client1"
                   "and clients 2")

        # Enable quota on volume
        g.log.info("Enabling quota on the volume %s", self.volname)
        ret, _, _ = quota_enable(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("Failed to enable quota on the volume "
                                  "%s", self.volname))
        g.log.info("Successfully enabled quota on the volume %s", self.volname)

        # Check if quota is enabled
        g.log.info("Validate Quota is enabled on the volume %s", self.volname)
        ret = is_quota_enabled(self.mnode, self.volname)
        self.assertTrue(
            ret, ("Quota is not enabled on the volume %s", self.volname))
        g.log.info("Successfully Validated quota is enabled on volume %s",
                   self.volname)

        # Setting up path to set quota limit

        path1 = "/level1/subdir1"
        path2 = "/dlevel1/dlevel2/dlevel3/subdir2"

        # Set Quota limit on the subdirectory "subdir1"

        g.log.info("Set Quota Limit on the path %s of the volume %s", path1,
                   self.volname)
        ret, _, _ = quota_limit_usage(self.mnode,
                                      self.volname,
                                      path1,
                                      limit="1GB")
        self.assertEqual(ret, 0, ("Failed to set quota limit on path %s of "
                                  " the volume %s", path1, self.volname))
        g.log.info(
            "Successfully set the Quota limit on %s of the volume "
            "%s", path1, self.volname)

        # Set Quota limit on the subdirectory "subdir2"

        g.log.info("Set Quota Limit on the path %s of the volume %s", path2,
                   self.volname)
        ret, _, _ = quota_limit_usage(self.mnode,
                                      self.volname,
                                      path2,
                                      limit="2GB")
        self.assertEqual(ret, 0, ("Failed to set quota limit on path %s of "
                                  " the volume %s", path2, self.volname))
        g.log.info(
            "Successfully set the Quota limit on %s of the volume "
            "%s", path2, self.volname)

        # Get Quota List on the volume

        g.log.info("Get Quota list on the volume %s", self.volname)
        quota_list = quota_fetch_list(self.mnode, self.volname)

        self.assertIsNotNone(quota_list, ("Failed to get the quota list "
                                          "of the volume %s", self.volname))

        # Check for subdir1 path in quota list

        self.assertIn(
            path1, quota_list.keys(),
            ("%s not part of the quota list %s even if "
             "it is set on the volume %s", path1, quota_list, self.volname))

        # Check for subdir2 path in quota list

        self.assertIn(
            path2, quota_list.keys(),
            ("%s not part of the quota list %s even if "
             "it is set on the volume %s", path2, quota_list, self.volname))
        g.log.info("Successfully listed quota list %s of the "
                   "volume %s", quota_list, self.volname)

        # Create near to 1GB of data on both subdir mounts

        for mount_object in self.subdir_mounts:
            g.log.info("Creating Files on %s:%s", mount_object.client_system,
                       mount_object.mountpoint)
            cmd = ("cd %s ; for i in `seq 1 1023` ;"
                   "do dd if=/dev/urandom of=file$i bs=1M "
                   "count=1;done" % (mount_object.mountpoint))
            ret, _, _ = g.run(mount_object.client_system, cmd)
            self.assertEqual(ret, 0, "Failed to create files on mountpoint")
            g.log.info("Files created successfully on mountpoint")

        # Again Get Quota List on the volume

        g.log.info("Get Quota list on the volume %s", self.volname)
        quota_list = quota_fetch_list(self.mnode, self.volname)

        self.assertIsNotNone(quota_list, ("Failed to get the quota list "
                                          "of the volume %s", self.volname))

        # Check for subdir1 path in quota list

        self.assertIn(
            path1, quota_list.keys(),
            ("%s not part of the quota list %s even if "
             "it is set on the volume %s", path1, quota_list, self.volname))

        # Check for subdir2 path in quota list

        self.assertIn(
            path2, quota_list.keys(),
            ("%s not part of the quota list %s even if "
             "it is set on the volume %s", path2, quota_list, self.volname))
        g.log.info("Successfully listed quota list %s of the "
                   "volume %s", quota_list, self.volname)

        # Again run IO's to check if quota limit is adhere for subdir1

        # Start IO's on subdir1
        g.log.info("Creating Files on %s:%s", self.clients[0],
                   self.subdir_mounts[0].mountpoint)
        cmd = ("cd %s ; for i in `seq 1024 1500` ;"
               "do dd if=/dev/urandom of=file$i bs=1M "
               "count=1;done" % (self.subdir_mounts[0].mountpoint))
        ret, _, _ = g.run(self.clients[0], cmd)
        if ret == 0:
            raise ExecutionError("IO was expected to Fail."
                                 "But it got passed")
        else:
            g.log.info(
                "IO's failed as expected on %s:%s as quota "
                "limit reached already", self.clients[0],
                self.subdir_mounts[0].mountpoint)

        # Start IO's on subdir2
        g.log.info("Creating Files on %s:%s", self.clients[1],
                   self.subdir_mounts[1].mountpoint)
        cmd = ("cd %s ; for i in `seq 1024 1500` ;"
               "do dd if=/dev/urandom of=file$i bs=1M "
               "count=1;done" % (self.subdir_mounts[1].mountpoint))
        ret, _, _ = g.run(self.clients[1], cmd)
        self.assertEqual(ret, 0,
                         ("Failed to create files on %s" % self.clients[1]))
        g.log.info("Files created successfully on %s:%s", self.clients[1],
                   self.subdir_mounts[1].mountpoint)