Пример #1
0
    def test_validate_auth_allow_and_auth_reject(self):
        """
        Test Case:
        1. Create and start a volume
        2. Disable brick mutliplex
        2. Set auth.allow option on volume for the client address on which
           volume is to be mounted
        3. Mount the volume on client and then unmmount it.
        4. Reset the volume
        5. Set auth.reject option on volume for the client address on which
           volume is to be mounted
        6. Mounting the volume should fail
        7. Reset the volume and mount it on client.
        8. Repeat the steps 2-7 with brick multiplex enabled
        """
        # Setting cluster.brick-multiplex to disable
        ret = set_volume_options(self.mnode, 'all',
                                 {'cluster.brick-multiplex': 'disable'})
        self.assertTrue(ret, "Failed to set brick-multiplex to enable.")
        g.log.info("Successfully set brick-multiplex to disable.")

        # Checking auth options with brick multiplex disabled
        self._check_validate_test()

        # Setting cluster.brick-multiplex to enable
        ret = set_volume_options(self.mnode, 'all',
                                 {'cluster.brick-multiplex': 'enable'})
        self.assertTrue(ret, "Failed to set brick-multiplex to enable.")
        g.log.info("Successfully set brick-multiplex to enable.")

        # Checking auth options with brick multiplex enabled
        self._check_validate_test()
    def test_change_reserve_limit_to_wrong_value(self):
        """
        Test Case:
        1) Create and start a distributed-replicated volume.
        2) Give different inputs to the storage.reserve volume set options
        3) Validate the command behaviour on wrong inputs
        """

        # Creation of random data for storage.reserve volume option
        # Data has: alphabets, numbers, punctuations and their combinations
        key = 'storage.reserve'

        for char_type in (string.ascii_letters, string.punctuation,
                          string.printable):

            # Remove quotes from the generated string
            temp_val = self.get_random_string(char_type)
            temp_val = temp_val.replace("'", "").replace("&", "")
            value = "'{}'".format(temp_val)
            ret = set_volume_options(self.mnode, self.volname, {key: value})
            self.assertFalse(
                ret, "Unexpected: Erroneous value {}, to option "
                "{} should result in failure".format(value, key))

        # Passing an out of range value
        for value in ('-1%', '-101%', '101%', '-1', '-101'):
            ret = set_volume_options(self.mnode, self.volname, {key: value})
            self.assertFalse(
                ret, "Unexpected: Erroneous value {}, to option "
                "{} should result in failure".format(value, key))
    def test_disperse_eager_lock_cli(self):
        """
        Testcase Steps:
        1.Create an EC volume
        2.Set the eager lock option by turning
          on disperse.eager-lock by using different inputs:
          - Try non boolean values(Must fail)
          - Try boolean values
        """
        # Set the eager lock option by turning
        # on disperse.eager-lock by using different inputs
        key = 'disperse.eager-lock'

        # Set eager lock option with non-boolean value
        for char_type in (string.ascii_letters, string.punctuation,
                          string.printable, string.digits):
            temp_val = self.get_random_string(char_type)
            value = "{}".format(temp_val)
            ret = set_volume_options(self.mnode, self.volname, {key: value})
            self.assertFalse(
                ret, "Unexpected: Erroneous value {}, to option "
                "{} should result in failure".format(value, key))

        # Set eager lock option with boolean value
        for value in ('1', '0', 'off', 'on', 'disable', 'enable'):
            ret = set_volume_options(self.mnode, self.volname, {key: value})
            self.assertTrue(
                ret, "Unexpected: Boolean value {},"
                " to option {} shouldn't result in failure".format(value, key))
        g.log.info("Only Boolean values are accpeted by eager lock.")
Пример #4
0
    def set_and_check_vol_option(self,
                                 option_name,
                                 option_value,
                                 for_all=False):
        """ Function for setting and checking volume_options """
        # Set the volume option
        vol_option = {option_name: option_value}
        if not for_all:
            ret = set_volume_options(self.mnode, self.volname, vol_option)
        else:
            ret = set_volume_options(self.mnode, 'all', vol_option)
        self.assertTrue(
            ret, "gluster volume option set of %s to %s failed" %
            (option_name, option_value))

        # Validate the option set
        if not for_all:
            ret = get_volume_options(self.mnode, self.volname, option_name)
        else:
            ret = get_volume_options(self.mnode, 'all', option_name)
        self.assertIsNotNone(ret, "The %s option is not present" % option_name)
        self.assertEqual(ret[option_name], option_value,
                         ("Volume option for %s is not equal to %s" %
                          (option_name, option_value)))
        g.log.info("Volume option %s is equal to the expected value %s",
                   option_name, option_value)
Пример #5
0
    def test_validate_profile_for_inodelk(self):
        """
        Test Steps:
        1) Create an ecvolume and mount it
        2) Set the eagerlock option
        3) Create a 1GB file
        4) View the profile of the volume for INODELK count must be about
           2-10 locks for each brick.
        5) check backend bricks for trusted.ec.dirty xattr must be non-zero
        6) Disable the eagerlock option
        7) Repeat steps 3-5 and now dirty xattr must be zero and
           INODELK count in range of 100-5k.
        """

        # Enable EagerLock
        ret = set_volume_options(self.mnode, self.volname, {
            'disperse.eager-lock': 'on',
            'disperse.eager-lock-timeout': '10'
        })
        self.assertTrue(ret, "Failed to turn on eagerlock"
                        "on %s" % self.volname)

        # Start profile on volume.
        ret, _, _ = profile_start(self.mnode, self.volname)
        self.assertEqual(
            ret, 0, "Failed to start profile on volume: %s" % self.volname)

        # Test behavior with EagerLock on
        filename = self._file_create_and_profile_info("on")
        self.assertIsNotNone(filename, "Failed to get filename")

        # Test dirty bit with EagerLock on
        ret = self._check_dirty_xattr(filename)
        self.assertEqual(
            ret, '0x00000000000000010000000000000001',
            "Unexpected dirty xattr value is %s on %s" % (ret, filename))

        # Disable EagerLock
        ret = set_volume_options(self.mnode, self.volname,
                                 {'disperse.eager-lock': 'off'})
        self.assertTrue(ret, "Failed to turn off eagerlock "
                        "on %s" % self.volname)

        # Test behavior with EagerLock off
        filename = self._file_create_and_profile_info("off")
        self.assertIsNotNone(filename, "Failed to get filename")

        # Test dirty bit with EagerLock off
        ret = self._check_dirty_xattr(filename)
        self.assertEqual(
            ret, '0x00000000000000000000000000000000',
            "Unexpected dirty xattr value is %s on %s" % (ret, filename))

        # Stop profile on volume.
        ret, _, _ = profile_stop(self.mnode, self.volname)
        self.assertEqual(ret, 0,
                         "Failed to stop profile on volume: %s" % self.volname)
Пример #6
0
    def test_stat_prefetch(self):

        # pylint: disable=ungrouped-imports
        self.vips = (g.config['gluster']['cluster_config']['smb']['ctdb_vips'])
        # Virtual Ip of first node to mount
        self.vips_mnode = self.vips[0]['vip']
        g.log.info("CTDB Virtual Ip %s", self.vips_mnode)
        # run IOs
        self.counter = 1
        g.log.info("Starting IO on all mounts...")
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            ret, _, _ = mount_volume(self.volname,
                                     'cifs',
                                     mount_obj.mountpoint,
                                     self.vips_mnode,
                                     mount_obj.client_system,
                                     smbuser='******',
                                     smbpasswd='foobar')
            self.assertEqual(ret, 0, "Cifs Mount Failed")
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_files -f 10000"
                   " --base-file-name ctdb-cifs "
                   " --fixed-file-size 10k %s/samba/" %
                   (self.script_upload_path, mount_obj.mountpoint))

            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
        self.io_validation_complete = False
        # Switch off and switch on stat-prefetch
        options = {"stat-prefetch": "off"}
        ret = set_volume_options(self.mnode, self.volname, options)
        if not ret:
            raise ExecutionError("Failed to execute volume set"
                                 "option command")
        ret = get_volume_options(self.mnode, self.volname)
        if ret['performance.stat-prefetch'] != "off":
            raise ExecutionError("Failed to set stat-prefetch off")
        options = {"stat-prefetch": "on"}
        ret = set_volume_options(self.mnode, self.volname, options)
        if not ret:
            raise ExecutionError("Failed to execute volume set"
                                 "option command")
        ret = get_volume_options(self.mnode, self.volname)
        if ret['performance.stat-prefetch'] != "on":
            raise ExecutionError("Failed to set stat-prefetch on")
        # Validate IO
        g.log.info("Wait for IO to complete and validate IO ...")
        ret = validate_io_procs(self.all_mounts_procs, self.mounts)
        self.io_validation_complete = True
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Creation of 10000 files Success")
        g.log.info("test__samba_ctdb_cifs_io_rename PASSED")
    def test_setting_volume_level_option_to_cluster(self):
        # pylint: disable=too-many-statements
        """
        Test Case:
        1) Create a cluster.
        2) Try to set volume level options to cluster level.
           (These should fail!)
        eg: gluster v set all transport.listen-backlog 128
            gluster v set all performance.parallel-readdir on
        3) Check if glusterd has crashed or not.(Should not crash!)
        """

        # Set transport.listen-backlog to 128 for all volumes.(Should fail!)
        ret = set_volume_options(self.mnode, 'all',
                                 {'transport.listen-backlog': '128'})
        self.assertFalse(
            ret, "Error: Able to set transport.listen-backlog "
            "to 128 for all volumes.")
        g.log.info("EXPECTED: Failed to set transport.listen-backlog to 128"
                   " for all volumes.")

        # Checking if glusterd is running on all the nodes.
        ret = is_glusterd_running(self.servers)
        self.assertEqual(ret, 0, "glusterd has crashed.")
        g.log.info("glusterd is running on all servers.")

        # Checking if all the peers are in connected state or not.
        ret = is_peer_connected(self.mnode, self.servers)
        self.assertTrue(ret, "All peers are not in connected state.")
        g.log.info("All peers are in connected state.")

        # Set performance.parallel-readdir to on for all volumes.(Should fail!)
        ret = set_volume_options(self.mnode, 'all',
                                 {'performance.parallel-readdir': 'on'})
        self.assertFalse(
            ret, "Error: Able to set performance.parallel"
            "-readdir to ON for all volumes.")
        g.log.info("EXPECTED: Failed to set parallel-readdir to"
                   " ON for all volumes.")

        # Checking if glusterd is running on all the nodes
        ret = is_glusterd_running(self.servers)
        self.assertEqual(ret, 0, "glusterd has crashed.")
        g.log.info("glusterd is running on all servers.")

        # Checking if all the peers are in connected state or not.
        ret = is_peer_connected(self.mnode, self.servers)
        self.assertTrue(ret, "All peers are not in connected state.")
        g.log.info("All peers are in connected state.")
    def test_consistent_timestamps_feature(self):
        '''
        Test Steps:
        1. Create a volume, enable features.ctime, mount volume
        2. Create different files and directories
        3. For each entry trusted.glusterfs.mdata  must be set
        4. For every entry, above xattr must match on each brick of replicaset
        5. Delete all data created
        6. turn off features.ctime
        7. Again create different files and directories
        8. "glusterfs.mdata xattr" must not be present for any entry
        9. Delete created data
        '''
        # pylint: disable=too-many-statements

        # Enable features.ctime
        ret = set_volume_options(self.mnode, self.volname,
                                 {'features.ctime': 'on'})
        self.assertTrue(ret,
                        'failed to enable ctime feature on %s' % self.volume)
        g.log.info("Successfully enabled ctime feature on %s", self.volume)

        # Create different files and directories
        self.data_create('ctime-on')

        # Check if mdata xattr has been set for all entries
        # Check if the values are same across all replica copies
        self.validate_xattr_values('ctime-on')

        # Delete all the existing data
        self.data_delete('ctime-on')

        # Disable features.ctime
        ret = set_volume_options(self.mnode, self.volname,
                                 {'features.ctime': 'off'})
        self.assertTrue(
            ret,
            'failed to disable features_ctime feature on %s' % self.volume)
        g.log.info("Successfully disabled ctime feature on %s", self.volume)

        # Create different files and directories
        self.data_create('ctime-off')

        # Check that mdata xattr has not been set for any entries
        self.validate_xattr_values('ctime-off', ctime=False)

        # Delete all the existing data
        self.data_delete('ctime-off')
    def setUp(self):
        """
        setup volume and mount volume
        Initiate necessary variables
        """

        # calling GlusterBaseClass setUp
        self.get_super_method(self, 'setUp')()

        self.file_limit = 0

        # Setup Volume and Mount Volume
        ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
        if not ret:
            raise ExecutionError("Failed to Setup_Volume %s" % self.volname)
        g.log.info("Successful in Setup Volume %s", self.volname)
        self.session = "test-session-%s" % self.volname
        self.outfiles = [("/tmp/test-outfile-%s-%s.txt"
                          % (self.volname, i))for i in range(0, 2)]

        # Set the changelog rollover-time to 1 second
        # This needs to be done in order for glusterfind to keep checking
        # for changes in the mount point
        option = {'changelog.rollover-time': '1'}
        ret = set_volume_options(self.mnode, self.volname, option)
        if not ret:
            raise ExecutionError("Failed to set the volume option %s for %s"
                                 % (option, self.volname))
        g.log.info("Successfully set the volume option for the volume %s",
                   self.volname)
    def test_glusterd_set_reset_reserve_limit(self):
        """
        Test set and reset of reserve limit on glusterd
        1. Create a volume and start it.
        2. Set storage.reserve limit on the created volume and verify it.
        3. Reset storage.reserve limit on the created volume and verify it.
        """
        # Setting storage.reserve to 50
        ret = set_volume_options(self.mnode, self.volname,
                                 {'storage.reserve': '50'})
        self.assertTrue(ret,
                        "Failed to set storage reserve on %s" % self.mnode)

        # Validate storage.reserve option set to 50
        self.validate_vol_option('storage.reserve', '50')

        # Reseting the storage.reserve limit
        ret, _, _ = reset_volume_option(self.mnode, self.volname,
                                        'storage.reserve')
        self.assertEqual(ret, 0, "Failed to reset the storage.reserve limit")

        # Validate that the storage.reserve option is reset
        ret = get_volume_options(self.mnode, self.volname, 'storage.reserve')
        if ret['storage.reserve'] == '1':
            self.validate_vol_option('storage.reserve', '1')
        else:
            self.validate_vol_option('storage.reserve', '1 (DEFAULT)')
    def tearDown(self):

        vol_list = get_volume_list(self.mnode)
        if vol_list is None:
            raise ExecutionError("Failed to get volume list")

        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Failed Cleanup the volume")
            g.log.info("Volume deleted successfully %s", volume)

        # Setting quorum ratio to 51%
        ret = set_volume_options(self.mnode, 'all',
                                 {'cluster.server-quorum-ratio': '51%'})
        if not ret:
            raise ExecutionError("Failed to set server quorum ratio on %s" %
                                 self.volname)

        # Peer probe servers since we are doing peer detach in setUpClass
        for server in self.servers:
            ret = is_peer_connected(server, self.servers)
            if not ret:
                ret = peer_probe_servers(server, self.servers)
                if not ret:
                    raise ExecutionError(
                        "Peer probe failed to one of the node")
                g.log.info("Peer probe successful")

        self.get_super_method(self, 'tearDown')()
Пример #12
0
    def setUp(self):
        """
        setup volume and mount volume
        Initiate necessary variables
        """

        # calling GlusterBaseClass setUp
        self.get_super_method(self, 'setUp')()

        # Setup Volume and Mount Volume
        g.log.info("Starting to Setup %s", self.volname)
        ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
        if not ret:
            raise ExecutionError("Failed to Setup_Volume %s" % self.volname)
        g.log.info("Successful in Setup Volume %s", self.volname)
        self.session = 'test-session-%s' % self.volname
        self.outfiles = [('/tmp/test-outfile-%s-%s.txt' % (self.volname, i))
                         for i in range(0, 2)]

        # Set the changelog rollover-time to 1 second
        g.log.info("Setting the changelog rollover-time to 1 second")
        option = {'changelog.rollover-time': '1'}
        ret = set_volume_options(self.mnode, self.volname, option)
        if not ret:
            raise ExecutionError("Failed to set the volume option %s for %s" %
                                 (option, self.volname))
        g.log.info("Successfully set the volume option for the volume %s",
                   self.volname)
    def tearDown(self):

        ret = is_glusterd_running(self.servers)
        if ret:
            ret = start_glusterd(self.servers)
            if not ret:
                raise ExecutionError("Failed to start glusterd on servers")

        # checking for peer status from every node
        count = 0
        while count < 80:
            ret = self.validate_peers_are_connected()
            if ret:
                break
            sleep(2)
            count += 1

        if not ret:
            raise ExecutionError("Servers are not in connected state")

        # Setting quorum ratio to 51%
        ret = set_volume_options(self.mnode, 'all',
                                 {'cluster.server-quorum-ratio': '51%'})
        if not ret:
            raise ExecutionError("Failed to set server quorum ratio on %s" %
                                 self.volname)

        # stopping the volume and Cleaning up the volume
        ret = self.cleanup_volume()
        if not ret:
            raise ExecutionError("Failed to Cleanup the Volume %s" %
                                 self.volname)
        g.log.info("Volume deleted successfully : %s", self.volname)

        GlusterBaseClass.tearDown.im_func(self)
Пример #14
0
    def setUpClass(cls):
        """Upload the necessary scripts to run tests.
        """
        # Calling GlusterBaseClass setUpClass
        cls.get_super_method(cls, 'setUpClass')()

        # Upload io scripts for running IO on mounts
        cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
                                  "file_dir_ops.py")
        ret = upload_scripts(cls.clients, cls.script_upload_path)
        if not ret:
            raise ExecutionError("Failed to upload IO scripts to clients %s" %
                                 cls.clients)
        g.log.info("Successfully uploaded IO scripts to clients %s",
                   cls.clients)

        cls.counter = 1

        # Temporary code:
        # Additional checks to gather infomartion from all
        # servers for Bug 1810901 and setting log level to debug.
        ret = set_volume_options(cls.mnode, 'all',
                                 {'cluster.daemon-log-level': 'DEBUG'})
        if not ret:
            g.log.error('Failed to set cluster.daemon-log-level to DEBUG')
Пример #15
0
    def test_time_stamps_on_create(self):
        '''
        This case validates BZ#1761932
        1. Create a volume , enable features.ctime, mount volume
        2. Create a directory "dir1" and check the a|m|c times
        3. Create a file "file1"  and check the a|m|c times
        4. Again create a new file "file2" as below
            command>>> touch file2;stat file2;stat file2
        5. Check the a|m|c times of "file2"
        6. The atime,ctime,mtime must be same within each object
        '''
        # pylint: disable=too-many-statements

        # Check if ctime feature is disabled by default
        ret = get_volume_options(self.mnode, self.volname, "features.ctime")
        self.assertEqual(ret['features.ctime'], 'off',
                         'features_ctime is not disabled by default')
        g.log.info("ctime feature is disabled by default as expected")

        # Enable features.ctime
        ret = set_volume_options(self.mnode, self.volname,
                                 {'features.ctime': 'on'})
        self.assertTrue(
            ret, 'failed to enable features_ctime feature on %s' % self.volume)
        g.log.info("Successfully enabled ctime feature on %s", self.volume)

        # Create a directory and check if ctime, mtime, atime is same
        objectname = 'dir1'
        objectpath = ('%s/%s' % (self.mounts[0].mountpoint, objectname))
        ret = mkdir(self.mounts[0].client_system, objectpath)
        self.assertTrue(ret, "{} creation failed".format(objectname))
        g.log.info("%s was successfully created on %s", objectname,
                   self.mounts[0])
        self.validate_timestamp(objectpath, objectname)

        # Create a file and check if ctime, mtime, atime is same
        objectname = 'file1'
        objectpath = ('%s/%s' % (self.mounts[0].mountpoint, objectname))
        cmd = ('touch  %s' % objectpath)
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(
            ret, 0, "touch command to create {} has "
            "failed".format(objectname))
        g.log.info("%s was successfully created on %s", objectname,
                   self.mounts[0])
        self.validate_timestamp(objectpath, objectname)

        # Create a file and issue stat immediately. This step helps in
        # testing a corner case where issuing stat immediately was changing
        # ctime before the touch was effected on the disk
        objectname = 'file2'
        objectpath = ('%s/%s' % (self.mounts[0].mountpoint, objectname))
        cmd = ("touch {obj};stat {obj};stat {obj}".format(obj=objectpath))
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(
            ret, 0, "touch command to create {}  has "
            "failed".format(objectname))
        g.log.info("%s was successfully created on %s", objectname,
                   self.mounts[0])
        self.validate_timestamp(objectpath, objectname)
    def test_enable_storage_reserve_volume(self):
        """
        1) Create a distributed-replicated volume and start it.
        2) Enable storage.reserve option on the volume using below command,
        gluster volume set storage.reserve.
            let's say, set it to a value of 50.
        3) Mount the volume on a client
        4) check df -h output of the mount point and backend bricks.
        """
        # Set volume option storage.reserve 50
        ret = set_volume_options(
            self.mnode, self.volname, {"storage.reserve ": 50})
        self.assertTrue(
            ret, "gluster volume set {} storage.reserve 50 Failed on server "
                 "{}".format(self.volname, self.mnode))
        # Mounting the volume on a client
        ret = self.mount_volume(self.mounts)
        if not ret:
            raise ExecutionError("Failed to mount volume")

        ret, out, _ = g.run(
            self.clients[0], "df -h | grep -i '{}'".format(
                self.mounts[0].mountpoint))
        self.assertFalse(
            ret, "Failed to run cmd df -h on client {}".format
            (self.clients[0]))

        self.assertTrue("51%" in out.split(" "), "51 % is not in list ")
Пример #17
0
    def test_offline_brick_status_when_quorum_not_met(self):
        """
        Test Brick status when Quorum is not met after glusterd restart.
        1. Create a volume and mount it.
        2. Set the quorum type to 'server'.
        3. Bring some nodes down such that quorum won't be met.
        4. Brick status should be offline in the node which is up.
        5. Restart glusterd in this node.
        6. The brick status still should be offline as quorum isn't met.
        """
        # Set the quorum type to server and validate it.
        vol_option = {'cluster.server-quorum-type': 'server'}
        ret = set_volume_options(self.mnode, self.volname, vol_option)
        self.assertTrue(ret, "gluster volume option set of %s to %s failed"
                        % ('cluster.server-quorum-type', 'server'))
        g.log.info("Cluster quorum set to type server.")

        # Get the brick list.
        brick_list = get_all_bricks(self.mnode, self.volname)

        # Stop glusterd processes.
        ret = stop_glusterd(self.servers[1:])
        self.assertTrue(ret, "Failed to stop glusterd on specified nodes.")
        g.log.info("Glusterd processes stopped in the desired servers.")

        # Get the brick status in a node where glusterd is up.
        ret = are_bricks_offline(self.mnode, self.volname, brick_list[0:1])
        self.assertTrue(ret, "Bricks are online")
        g.log.info("Bricks are offline as expected.")

        # Restart one of the node which is up.
        ret = restart_glusterd(self.servers[0])
        self.assertTrue(ret, ("Failed to restart glusterd on desired node."))
        g.log.info("Glusterd restarted on the desired node.")

        # Wait for glusterd to be online and validate it's running.
        self.assertTrue(wait_for_glusterd_to_start(self.servers[0]),
                        "Glusterd not up on the desired server.")
        g.log.info("Glusterd is up in the desired server.")

        # Get the brick status from the restarted node.
        ret = are_bricks_offline(self.mnode, self.volname, brick_list[0:1])
        self.assertTrue(ret, "Bricks are online")
        g.log.info("Bricks are offline as expected.")

        # Start glusterd on all servers.
        ret = start_glusterd(self.servers)
        self.assertTrue(ret, "Failed to start glusterd on the specified nodes")
        g.log.info("Initiated start of glusterd on all nodes.")

        # Wait for glusterd to start.
        ret = wait_for_glusterd_to_start(self.servers)
        self.assertTrue(ret, "Glusterd not up on all nodes.")
        g.log.info("Glusterd is up and running on all nodes.")

        # Wait for all volume processes to be online
        ret = wait_for_volume_process_to_be_online(self.mnode, self.volname,
                                                   timeout=600)
        self.assertTrue(ret, ("All volume processes not up."))
        g.log.info("All volume processes are up.")
    def test_client_side_quorum_with_fixed_validate_max_bricks(self):
        """
        Test Script with Client Side Quorum with fixed should validate
        maximum number of bricks to accept

        * set cluster quorum to fixed
        * set cluster.quorum-count to higher number which is greater than
          number of replicas in a sub-voulme
        * Above step should fail

        """

        # set cluster.quorum-type to fixed
        options = {"cluster.quorum-type": "fixed"}
        g.log.info("setting %s for the volume %s", options, self.volname)
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(ret, ("Unable to set %s for volume %s"
                              % (options, self.volname)))
        g.log.info("Successfully set %s for volume %s", options, self.volname)

        # get the subvolumes
        g.log.info("Starting to get sub-volumes for volume %s", self.volname)
        subvols_dict = get_subvols(self.mnode, self.volname)
        num_subvols = len(subvols_dict['volume_subvols'])
        g.log.info("Number of subvolumes in volume %s is %s", self.volname,
                   num_subvols)

        # get the number of bricks in replica set
        num_bricks_in_subvol = len(subvols_dict['volume_subvols'][0])
        g.log.info("Number of bricks in each replica set : %s",
                   num_bricks_in_subvol)

        # set cluster.quorum-count to higher value than the number of bricks in
        # repliac set
        start_range = num_bricks_in_subvol + 1
        end_range = num_bricks_in_subvol + 30
        for i in range(start_range, end_range):
            options = {"cluster.quorum-count": "%s" % i}
            g.log.info("setting %s for the volume %s", options, self.volname)
            ret = set_volume_options(self.mnode, self.volname, options)
            self.assertFalse(ret, ("Able to set %s for volume %s, quorum-count"
                                   " should not be greater than number of"
                                   " bricks in replica set"
                                   % (options, self.volname)))
        g.log.info("Expected: Unable to set %s for volume %s, "
                   "quorum-count should be less than number of bricks "
                   "in replica set", options, self.volname)
Пример #19
0
    def tearDown(self):
        """
        tearDown for every test
        """
        if not self.glusterd_service:
            ret = start_glusterd(self.servers[1])
            if not ret:
                raise ExecutionError("Failed to start glusterd services "
                                     "for : %s" % self.servers[1])

        # Checking glusterd service running or not
        ret = is_glusterd_running(self.servers[1])
        if ret == 0:
            g.log.info("glusterd running on :%s", self.servers[1])
        else:
            raise ExecutionError("glusterd not running on :%s" %
                                 self.servers[1])

        # In this test case performing quorum operations,
        # deleting volumes immediately after glusterd services start, volume
        # deletions are failing with quorum not met,
        # that's the reason verifying peers are connected or not before
        # deleting volumes
        peers_not_connected = True
        count = 0
        while count < 10:
            ret = self.validate_peers_are_connected()
            if ret:
                peers_not_connected = False
                break
            count += 1
            sleep(5)
        if peers_not_connected:
            raise ExecutionError("Servers are not in peer probed state")

        # Reverting back the quorum ratio to 51%
        self.quorum_perecent = {'cluster.server-quorum-ratio': '51%'}
        ret = set_volume_options(self.mnode, 'all', self.quorum_perecent)
        if not ret:
            raise ExecutionError(
                ret, "gluster volume set all cluster"
                ".server-quorum- ratio percentage Failed"
                " :%s" % self.servers)
        g.log.info(
            "gluster volume set all cluster.server-quorum-ratio 51"
            "percentage enabled successfully :%s", self.servers)

        # stopping the volume and Cleaning up the volume
        for volume in self.volume_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Failed to Cleanup the "
                                     "Volume %s" % volume)
            g.log.info("Volume deleted successfully : %s", volume)

        # Calling GlusterBaseClass tearDown
        self.get_super_method(self, 'tearDown')()
Пример #20
0
 def _set_option_for_volume(self, volume, option):
     """
     Set an option for a volume
     """
     ret = set_volume_options(self.mnode, volume, option)
     self.assertTrue(
         ret,
         "Failed to set option:value %s on volume %s" % (option, volume))
     g.log.info("Successfully set option:value %s for volume %s", option,
                volume)
Пример #21
0
 def _set_and_assert_volume_option(self, key, value, xfail=False):
     '''Set and assert volume option'''
     ret = set_volume_options(self.mnode, self.volname, {key: value})
     assert_method = self.assertTrue
     assert_msg = 'Unable to set {0} to {1}'.format(key, value)
     if xfail:
         assert_method = self.assertFalse
         assert_msg = 'Should not be able to set {0} to {1}'.format(
             key, value)
     assert_method(ret, assert_msg)
    def test_memory_leak_in_shd_with_cache_invalidation_on(self):
        """
        Test case:
        1. Create a volume, start it and mount it.
        2. Set features.cache-invalidation to ON.
        3. Start I/O from mount point.
        4. Run gluster volume heal command in a loop
        5. Check if there are any memory leaks and OOM killers on servers.
        """
        # Start monitoring resource usage on servers and clients
        monitor_proc_dict = self.start_memory_and_cpu_usage_logging(
            self.test_id, count=10)
        self.assertIsNotNone(
            monitor_proc_dict, "Failed to start monitoring on servers and"
            " clients")

        # Set features.cache-invalidation to ON
        ret = set_volume_options(self.mnode, self.volname,
                                 {'features.cache-invalidation': 'on'})
        self.assertTrue(ret, "Failed to set features.cache-invalidation to ON")
        g.log.info("Successfully set features.cache-invalidation to ON")

        # Start multiple I/O from mount points
        self.list_of_io_processes = []
        cmd = (
            "cd {};for i in `seq 1 1000`;do echo 'abc' > myfile;done".format(
                self.mounts[0].mountpoint))
        ret = g.run_async(self.mounts[0].client_system, cmd)
        self.list_of_io_processes = [ret]
        self.is_io_running = True

        # Run gluster volume heal command in a loop for 100 iterations
        for iteration in range(0, 100):
            g.log.info("Running gluster volume heal command for %d time",
                       iteration)
            ret = trigger_heal(self.mnode, self.volname)
            self.assertTrue(ret, "Heal command triggered successfully")
        g.log.info("Ran gluster volume heal command in a loop for "
                   "100 iterations.")

        # Wait for I/O to complete and validate I/O on mount points
        ret = validate_io_procs(self.list_of_io_processes, self.mounts[0])
        self.assertTrue(ret, "I/O failed on mount point")
        self.is_io_running = False

        # Wait for monitoring processes to complete
        ret = wait_for_logging_processes_to_stop(monitor_proc_dict,
                                                 cluster=True)
        self.assertTrue(ret, "ERROR: Failed to stop monitoring processes")

        # Check if there are any memory leaks and OOM killers
        ret = self.check_for_memory_leaks_and_oom_kills_on_servers(
            self.test_id)
        self.assertFalse(ret,
                         "Memory leak and OOM kills check failed on servers")
Пример #23
0
    def test_setting_vol_option_with_max_characters(self):

        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertTrue(ret, ("Failed to create "
                              "and start volume %s" % self.volname))
        auth_list = []
        for ip_addr in range(256):
            auth_list.append('192.168.122.%d' % ip_addr)
        for ip_addr in range(7):
            auth_list.append('192.168.123.%d' % ip_addr)
        ip_list = ','.join(auth_list)

        # set auth.allow with <4096 characters and restart the glusterd
        g.log.info("Setting auth.allow with string of length %d for %s",
                   len(ip_list), self.volname)
        self.options = {"auth.allow": ip_list}
        ret = set_volume_options(self.mnode, self.volname, self.options)
        self.assertTrue(ret, ("Failed to set auth.allow with string of length"
                              " %d for %s" % (len(ip_list), self.volname)))
        ret = restart_glusterd(self.mnode)
        self.assertTrue(ret,
                        "Failed to restart the glusterd on %s" % self.mnode)

        # set auth.allow with >4096 characters and restart the glusterd
        ip_list = ip_list + ",192.168.123.7"
        self.options = {"auth.allow": ip_list}
        g.log.info("Setting auth.allow with string of length %d for %s",
                   len(ip_list), self.volname)
        ret = set_volume_options(self.mnode, self.volname, self.options)
        self.assertTrue(ret, ("Failed to set auth.allow with string of length"
                              " %d for %s" % (len(ip_list), self.volname)))
        ret = restart_glusterd(self.mnode)
        self.assertTrue(ret,
                        "Failed to restart the glusterd on %s" % self.mnode)
        count = 0
        while count < 60:
            ret = is_glusterd_running(self.mnode)
            if not ret:
                break
            sleep(2)
            count += 1
        self.assertEqual(ret, 0, "glusterd is not running on %s" % self.mnode)
    def test_brick_full_add_brick_remove_brick(self):
        """
        Test case:
        1. Create a volume, start it and mount it.
        2. Fill few bricks till min-free-limit is reached.
        3. Add brick to the volume.(This should pass.)
        4. Set cluster.min-free-disk to 30%.
        5. Remove bricks from the volume.(This should pass.)
        6. Check for data loss by comparing arequal before and after.
        """
        # Fill few bricks till it is full
        bricks = get_all_bricks(self.mnode, self.volname)

        # Calculate the usable size and fill till it reaches
        # min free limit
        usable_size = get_usable_size_per_disk(bricks[0])
        subvols = get_subvols(self.mnode, self.volname)['volume_subvols']
        filename = "abc"
        for _ in range(0, usable_size):
            while (subvols[find_hashed_subvol(subvols, "/",
                                              filename)[1]] == subvols[0]):
                filename = self._get_random_string()
            ret, _, _ = g.run(
                self.mounts[0].client_system,
                "fallocate -l 1G {}/{}".format(self.mounts[0].mountpoint,
                                               filename))
            self.assertFalse(ret, "Failed to fill disk to min free limit")
            filename = self._get_random_string()
        g.log.info("Disk filled up to min free limit")

        # Collect arequal checksum before ops
        arequal_checksum_before = collect_mounts_arequal(self.mounts[0])

        # Add brick to volume
        ret = expand_volume(self.mnode, self.volname, self.servers,
                            self.all_servers_info)
        self.assertTrue(ret, "Failed to add brick on volume %s" % self.volname)

        # Set cluster.min-free-disk to 30%
        ret = set_volume_options(self.mnode, self.volname,
                                 {'cluster.min-free-disk': '30%'})
        self.assertTrue(ret, "Failed to set cluster.min-free-disk to 30%")

        # Remove bricks from the volume
        ret = shrink_volume(self.mnode, self.volname, rebalance_timeout=1800)
        self.assertTrue(ret, "Failed to remove-brick from volume")
        g.log.info("Remove-brick rebalance successful")

        # Check for data loss by comparing arequal before and after ops
        arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
        self.assertEqual(arequal_checksum_before, arequal_checksum_after,
                         "arequal checksum is NOT MATCHNG")
        g.log.info("arequal checksum is SAME")
Пример #25
0
 def _change_eagerlock_timeouts(self, timeoutval):
     """Change eagerlock and other-eagerlock timeout values as per input"""
     ret = set_volume_options(
         self.mnode, self.volname, {
             'disperse.eager-lock-timeout': timeoutval,
             'disperse.other-eager-lock-timeout': timeoutval
         })
     self.assertTrue(
         ret, 'failed to change eager-lock timeout values to '
         '%s sec on %s' % (timeoutval, self.volname))
     g.log.info("SUCCESS:Changed eager-lock timeout vals to %s sec on %s",
                timeoutval, self.volname)
    def test_setting_vol_option_with_max_characters(self):

        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertTrue(ret, ("Failed to create "
                              "and start volume %s" % self.volname))
        auth_list = []
        for ip_addr in range(256):
            auth_list.append('192.168.122.%d' % ip_addr)
        for ip_addr in range(7):
            auth_list.append('192.168.123.%d' % ip_addr)
        ip_list = ','.join(auth_list)

        # set auth.allow with <4096 characters and restart the glusterd
        g.log.info("Setting auth.allow with string of length %d for %s",
                   len(ip_list), self.volname)
        self.options = {"auth.allow": ip_list}
        ret = set_volume_options(self.mnode, self.volname, self.options)
        self.assertTrue(ret, ("Failed to set auth.allow with string of length"
                              " %d for %s" % (len(ip_list), self.volname)))
        ret = restart_glusterd(self.mnode)
        self.assertTrue(ret, "Failed to restart the glusterd on %s"
                        % self.mnode)

        # set auth.allow with >4096 characters and restart the glusterd
        ip_list = ip_list + ",192.168.123.7"
        self.options = {"auth.allow": ip_list}
        g.log.info("Setting auth.allow with string of length %d for %s",
                   len(ip_list), self.volname)
        ret = set_volume_options(self.mnode, self.volname, self.options)
        self.assertTrue(ret, ("Failed to set auth.allow with string of length"
                              " %d for %s" % (len(ip_list), self.volname)))
        ret = restart_glusterd(self.mnode)
        self.assertTrue(ret, "Failed to restart the glusterd on %s"
                        % self.mnode)

        ret = wait_for_glusterd_to_start(self.servers)
        self.assertTrue(ret, "glusterd is not running on %s"
                        % self.servers)
        g.log.info("Glusterd start on the nodes : %s "
                   "succeeded", self.servers)
Пример #27
0
    def test_gfid_assignment_on_all_subvols(self):
        """
        - Create a dis-rep volume and mount it.
        - Create a directory on mount and check whether all the bricks have
          the same gfid.
        - On the backend create a new directory on all the bricks.
        - Do lookup from the mount.
        - Check whether all the bricks have the same gfid assigned.
        """
        # Enable client side healing
        g.log.info("Enable client side healing options")
        options = {
            "metadata-self-heal": "on",
            "entry-self-heal": "on",
            "data-self-heal": "on"
        }
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(ret, 'Failed to set options %s' % options)
        g.log.info("Successfully set %s for volume %s", options, self.volname)

        # Create a directory on the mount
        g.log.info("Creating a directory")
        cmd = "/usr/bin/env python %s create_deep_dir -d 0 -l 0 %s/dir1 " % (
            self.script_upload_path, self.mounts[0].mountpoint)
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, "Failed to create directory on mountpoint")
        g.log.info("Directory created successfully on mountpoint")

        # Verify gfids are same on all the bricks
        self.verify_gfid("dir1")

        # Create a new directory on all the bricks directly
        bricks_list = get_all_bricks(self.mnode, self.volname)
        for brick in bricks_list:
            brick_node, brick_path = brick.split(":")

            ret, _, _ = g.run(brick_node, "mkdir %s/dir2" % (brick_path))
            self.assertEqual(
                ret, 0, "Failed to create directory on brick %s" % (brick))

        # To circumvent is_fresh_file() check in glusterfs code.
        time.sleep(2)

        # Do a clinet side lookup on the new directory and verify the gfid
        # All the bricks should have the same gfid assigned
        ret, _, _ = g.run(self.clients[0],
                          "ls %s/dir2" % self.mounts[0].mountpoint)
        self.assertEqual(ret, 0, "Lookup on directory \"dir2\" failed.")
        g.log.info("Lookup on directory \"dir2\" successful")

        # Verify gfid is assigned on all the bricks and are same
        self.verify_gfid("dir2")
    def test_ec_read_from_hardlink(self):
        """
        Test steps:
        1. Enable metadata-cache(md-cache) options on the volume
        2. Touch a file and create a hardlink for it
        3. Read data from the hardlink.
        4. Read data from the actual file.
        """
        options = {'group': 'metadata-cache'}
        # Set metadata-cache options as group
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(ret, "Unable to set the volume options {}".
                        format(options))
        g.log.info("Able to set the %s options", options)

        # Mounting the volume on one client
        ret, _, _ = mount_volume(self.volname, mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.mnode,
                                 mclient=self.mounts[0].client_system)
        self.assertEqual(ret, 0, ("Volume {} is not mounted").
                         format(self.volname))
        g.log.info("Volume mounted successfully : %s", self.volname)

        file_name = self.mounts[0].mountpoint + "/test1"
        content = "testfile"
        hard_link = self.mounts[0].mountpoint + "/test1_hlink"
        cmd = 'echo "{content}" > {file}'.format(file=file_name,
                                                 content=content)

        # Creating a file with data
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Sucessful in creating a file with data")
        g.log.info("file created successfully on %s",
                   self.mounts[0].mountpoint)

        # Creating a hardlink for the file created
        ret = create_link_file(self.mounts[0].client_system,
                               file_name, hard_link)
        self.assertTrue(ret, "Link file creation failed")
        g.log.info("Link file creation for %s is successful", file_name)

        # Reading from the file as well as the hardlink
        for each in (file_name, hard_link):
            ret, out, _ = g.run(self.mounts[0].client_system,
                                "cat {}".format(each))
            self.assertEqual(ret, 0, "Unable to read the {}".format(each))
            self.assertEqual(content, out.strip('\n'), "The content {} and"
                             " data in file {} is not same".
                             format(content, each))
            g.log.info("Read of %s file is successful", each)
    def test_enable_brickmux_create_and_stop_three_volumes(self):
        """
        Test Case:
        1.Set cluster.brick-multiplex to enabled.
        2.Create three 1x3 replica volumes.
        3.Start all the three volumes.
        4.Stop three volumes one by one.
        """

        # Timestamp of current test case of start time
        ret, test_timestamp, _ = g.run_local('date +%s')
        test_timestamp = test_timestamp.strip()

        # Setting cluster.brick-multiplex to enable
        ret = set_volume_options(self.mnode, 'all',
                                 {'cluster.brick-multiplex': 'enable'})
        self.assertTrue(ret, "Failed to set brick-multiplex to enable.")
        g.log.info("Successfully set brick-multiplex to enable.")

        # Create and start 3 volume
        for number in range(1, 4):
            self.volume['name'] = ("test_volume_%s" % number)
            self.volname = ("test_volume_%s" % number)
            ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
            self.assertTrue(ret,
                            "Failed to create and start %s" % self.volname)
            g.log.info("Successfully created and started volume %s.",
                       self.volname)

        # Checking brick process count.
        for brick in get_all_bricks(self.mnode, self.volname):
            server = brick.split(":")[0]
            count = get_brick_processes_count(server)
            self.assertEqual(
                count, 1, "ERROR: More than one brick process on %s." % server)
            g.log.info("Only one brick process present on %s", server)

        # Stop three volumes one by one.
        for number in range(1, 4):
            self.volume['name'] = ("test_volume_%s" % number)
            self.volname = ("test_volume_%s" % number)
            ret, _, _ = volume_stop(self.mnode, self.volname)
            self.assertEqual(ret, 0,
                             "Failed to stop the volume %s" % self.volname)
            g.log.info("Volume %s stopped successfully", self.volname)

        # Checking for core files.
        ret = is_core_file_created(self.servers, test_timestamp)
        self.assertTrue(ret, "Core file found.")
        g.log.info("No core files found, glusterd service running "
                   "successfully")
Пример #30
0
    def set_max_brick_process_to_string(self):
        """Set cluster.max-bricks-per-process to string"""
        key = 'cluster.max-bricks-per-process'
        for char_type in (string.ascii_letters, string.punctuation):

            temp_val = self.get_random_string(char_type)
            value = "{}".format(temp_val)
            ret = set_volume_options(self.mnode, 'all', {key: value})
            self.assertFalse(
                ret, "Unexpected: Erroneous value {}, to option "
                "{} should result in failure".format(value, key))
            g.log.info(
                "Expected: Erroneous value %s, to option "
                "%s resulted in failure", value, key)