Ejemplo n.º 1
0
    def set_and_check_vol_option(self,
                                 option_name,
                                 option_value,
                                 for_all=False):
        """ Function for setting and checking volume_options """
        # Set the volume option
        vol_option = {option_name: option_value}
        if not for_all:
            ret = set_volume_options(self.mnode, self.volname, vol_option)
        else:
            ret = set_volume_options(self.mnode, 'all', vol_option)
        self.assertTrue(
            ret, "gluster volume option set of %s to %s failed" %
            (option_name, option_value))

        # Validate the option set
        if not for_all:
            ret = get_volume_options(self.mnode, self.volname, option_name)
        else:
            ret = get_volume_options(self.mnode, 'all', option_name)
        self.assertIsNotNone(ret, "The %s option is not present" % option_name)
        self.assertEqual(ret[option_name], option_value,
                         ("Volume option for %s is not equal to %s" %
                          (option_name, option_value)))
        g.log.info("Volume option %s is equal to the expected value %s",
                   option_name, option_value)
Ejemplo n.º 2
0
    def test_stat_prefetch(self):

        # pylint: disable=ungrouped-imports
        self.vips = (g.config['gluster']['cluster_config']['smb']['ctdb_vips'])
        # Virtual Ip of first node to mount
        self.vips_mnode = self.vips[0]['vip']
        g.log.info("CTDB Virtual Ip %s", self.vips_mnode)
        # run IOs
        self.counter = 1
        g.log.info("Starting IO on all mounts...")
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            ret, _, _ = mount_volume(self.volname,
                                     'cifs',
                                     mount_obj.mountpoint,
                                     self.vips_mnode,
                                     mount_obj.client_system,
                                     smbuser='******',
                                     smbpasswd='foobar')
            self.assertEqual(ret, 0, "Cifs Mount Failed")
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_files -f 10000"
                   " --base-file-name ctdb-cifs "
                   " --fixed-file-size 10k %s/samba/" %
                   (self.script_upload_path, mount_obj.mountpoint))

            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
        self.io_validation_complete = False
        # Switch off and switch on stat-prefetch
        options = {"stat-prefetch": "off"}
        ret = set_volume_options(self.mnode, self.volname, options)
        if not ret:
            raise ExecutionError("Failed to execute volume set"
                                 "option command")
        ret = get_volume_options(self.mnode, self.volname)
        if ret['performance.stat-prefetch'] != "off":
            raise ExecutionError("Failed to set stat-prefetch off")
        options = {"stat-prefetch": "on"}
        ret = set_volume_options(self.mnode, self.volname, options)
        if not ret:
            raise ExecutionError("Failed to execute volume set"
                                 "option command")
        ret = get_volume_options(self.mnode, self.volname)
        if ret['performance.stat-prefetch'] != "on":
            raise ExecutionError("Failed to set stat-prefetch on")
        # Validate IO
        g.log.info("Wait for IO to complete and validate IO ...")
        ret = validate_io_procs(self.all_mounts_procs, self.mounts)
        self.io_validation_complete = True
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Creation of 10000 files Success")
        g.log.info("test__samba_ctdb_cifs_io_rename PASSED")
 def _validate_vol_options(self, option_name, option_value, for_all=False):
     """ Function to validate default vol options """
     if not for_all:
         ret = get_volume_options(self.mnode, self.volname, option_name)
     else:
         ret = get_volume_options(self.mnode, 'all', option_name)
     self.assertIsNotNone(ret, "The %s option is not present" % option_name)
     value = (ret[option_name]).split()
     self.assertEqual(value[0], option_value,
                      ("Volume option for %s is not equal to %s" %
                       (option_name, option_value)))
     g.log.info("Volume option %s is equal to the expected value %s",
                option_name, option_value)
Ejemplo n.º 4
0
 def _get_option_value_for_volume(self, option):
     """
     Get value of an option
     """
     option_value = get_volume_options(self.mnode, 'all', option)
     self.assertIsNotNone(option_value, "Failed to get %s option" % option)
     return option_value
    def test_glusterd_set_reset_reserve_limit(self):
        """
        Test set and reset of reserve limit on glusterd
        1. Create a volume and start it.
        2. Set storage.reserve limit on the created volume and verify it.
        3. Reset storage.reserve limit on the created volume and verify it.
        """
        # Setting storage.reserve to 50
        ret = set_volume_options(self.mnode, self.volname,
                                 {'storage.reserve': '50'})
        self.assertTrue(ret,
                        "Failed to set storage reserve on %s" % self.mnode)

        # Validate storage.reserve option set to 50
        self.validate_vol_option('storage.reserve', '50')

        # Reseting the storage.reserve limit
        ret, _, _ = reset_volume_option(self.mnode, self.volname,
                                        'storage.reserve')
        self.assertEqual(ret, 0, "Failed to reset the storage.reserve limit")

        # Validate that the storage.reserve option is reset
        ret = get_volume_options(self.mnode, self.volname, 'storage.reserve')
        if ret['storage.reserve'] == '1':
            self.validate_vol_option('storage.reserve', '1')
        else:
            self.validate_vol_option('storage.reserve', '1 (DEFAULT)')
Ejemplo n.º 6
0
def is_quota_enabled(mnode, volname):
    """Checks if quota is enabled on given volume

    Args:
        mnode (str): Node on which cmd has to be executed.
        volname (str): volume name

    Returns:
        bool: True, if quota is enabled
            False, if quota is disabled

    Example:
        is_quota_enabled(mnode, testvol)
    """

    output = get_volume_options(mnode, volname, "features.quota")
    if output is None:
        return False

    g.log.info("Quota Status in volume %s %s", volname,
               output["features.quota"])
    if output["features.quota"] != 'on':
        return False

    return True
Ejemplo n.º 7
0
    def test_time_stamps_on_create(self):
        '''
        This case validates BZ#1761932
        1. Create a volume , enable features.ctime, mount volume
        2. Create a directory "dir1" and check the a|m|c times
        3. Create a file "file1"  and check the a|m|c times
        4. Again create a new file "file2" as below
            command>>> touch file2;stat file2;stat file2
        5. Check the a|m|c times of "file2"
        6. The atime,ctime,mtime must be same within each object
        '''
        # pylint: disable=too-many-statements

        # Check if ctime feature is disabled by default
        ret = get_volume_options(self.mnode, self.volname, "features.ctime")
        self.assertEqual(ret['features.ctime'], 'off',
                         'features_ctime is not disabled by default')
        g.log.info("ctime feature is disabled by default as expected")

        # Enable features.ctime
        ret = set_volume_options(self.mnode, self.volname,
                                 {'features.ctime': 'on'})
        self.assertTrue(
            ret, 'failed to enable features_ctime feature on %s' % self.volume)
        g.log.info("Successfully enabled ctime feature on %s", self.volume)

        # Create a directory and check if ctime, mtime, atime is same
        objectname = 'dir1'
        objectpath = ('%s/%s' % (self.mounts[0].mountpoint, objectname))
        ret = mkdir(self.mounts[0].client_system, objectpath)
        self.assertTrue(ret, "{} creation failed".format(objectname))
        g.log.info("%s was successfully created on %s", objectname,
                   self.mounts[0])
        self.validate_timestamp(objectpath, objectname)

        # Create a file and check if ctime, mtime, atime is same
        objectname = 'file1'
        objectpath = ('%s/%s' % (self.mounts[0].mountpoint, objectname))
        cmd = ('touch  %s' % objectpath)
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(
            ret, 0, "touch command to create {} has "
            "failed".format(objectname))
        g.log.info("%s was successfully created on %s", objectname,
                   self.mounts[0])
        self.validate_timestamp(objectpath, objectname)

        # Create a file and issue stat immediately. This step helps in
        # testing a corner case where issuing stat immediately was changing
        # ctime before the touch was effected on the disk
        objectname = 'file2'
        objectpath = ('%s/%s' % (self.mounts[0].mountpoint, objectname))
        cmd = ("touch {obj};stat {obj};stat {obj}".format(obj=objectpath))
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(
            ret, 0, "touch command to create {}  has "
            "failed".format(objectname))
        g.log.info("%s was successfully created on %s", objectname,
                   self.mounts[0])
        self.validate_timestamp(objectpath, objectname)
 def validate_vol_option(self, option_name, value_expected):
     """ Function for validating volume options """
     # Get the volume option.
     ret = get_volume_options(self.mnode, self.volname, option_name)
     self.assertIsNotNone(ret, "The %s option is not present" % option_name)
     self.assertEqual(ret[option_name], value_expected,
                      ("Volume option for %s is not equal to %s" %
                       (option_name, value_expected)))
     g.log.info("Volume option %s is equal to the expected value %s",
                option_name, value_expected)
Ejemplo n.º 9
0
def verify_auth_allow(volname, server, auth_dict):
    """
    Verify authentication for volumes or sub directories as required

    Args:
        volname(str): The name of volume in which auth.allow
            has to be verified
        server(str): IP or hostname of one node
        auth_dict(dict): key-value pair of dirs and clients list
            Example: auth_dict = {'/d1':['10.70.37.172','10.70.37,173'],
                '/d3/subd1':['10.70.37.172','10.70.37.197']}
            If authentication is set on entire volume, use 'all' as key to
            verify.
                auth_dict = {'all': ['10.70.37.172','10.70.37,173']}
                auth_dict = {'all': ['*']}
                'all' refers to entire volume
    Returns (bool):
        True if the verification is success, else False
    """
    auth_details = []
    if not auth_dict:
        g.log.error("Authentication details are not provided")
        return False

    # Get the value of auth.allow option of the volume
    auth_clients_dict = get_volume_options(server, volname, "auth.allow")
    auth_clients = auth_clients_dict['auth.allow']

    # When authentication has to be verified on entire volume(not on sub-dirs)
    # check whether the required clients names are listed in auth.allow option
    if 'all' in auth_dict:
        clients_list = auth_clients.split(',')
        res = all(elem in clients_list for elem in auth_dict['all'])
        if not res:
            g.log.error("Authentication verification failed. auth.allow: %s",
                        auth_clients)
            return False
        g.log.info("Authentication verified successfully. auth.allow: %s",
                   auth_clients)
        return True

    # When authentication has to be verified on on sub-dirs, convert the key-
    # value pair to a format which matches the value of auth.allow option.
    for key, value in list(auth_dict.items()):
        auth_details.append("%s(%s)" % (key, "|".join(value)))

    # Check whether the required clients names are listed in auth.allow option
    for auth_detail in auth_details:
        if auth_detail not in auth_clients:
            g.log.error("Authentication verification failed. auth.allow: %s",
                        auth_clients)
            return False
    g.log.info("Authentication verified successfully. auth.allow: %s",
               auth_clients)
    return True
 def _set_granular_heal_to_on_or_off(self, enabled=False):
     """Set granular heal to ON or OFF"""
     granular = get_volume_options(self.mnode, self.volname,
                                   'granular-entry-heal')
     if enabled:
         if granular['cluster.granular-entry-heal'] != 'on':
             ret = enable_granular_heal(self.mnode, self.volname)
             self.assertTrue(ret, "Unable to set granular-entry-heal to on")
     else:
         if granular['cluster.granular-entry-heal'] == 'on':
             ret = disable_granular_heal(self.mnode, self.volname)
             self.assertTrue(ret,
                             "Unable to set granular-entry-heal to off")
Ejemplo n.º 11
0
def quota_check_deem_statfs(mnode, volname):
    """Checks if quota-deem-statfs is enabled
    on given volume

    Args:
        mnode (str): Node on which cmd has to be executed.
        volname (str): volume name

    Returns:
        bool: True, if quota-deem-statfs is enabled
            False, if quota-deem-statfs is disabled

    Example:
        quota_check_deem_statfs(mnode, testvol)
    """

    output = get_volume_options(mnode, volname, "features.quota-deem-statfs")
    if not output:
        g.log.error(
            "Failed to get current status of "
            "'features.quota-deem-statfs' option "
            "for volume %s", volname)
        return False

    g.log.info("Quota deem-statfs status in volume %s: %s", volname,
               output["features.quota-deem-statfs"])

    if 'features.quota-deem-statfs' in output.keys():
        if output['features.quota-deem-statfs'] == 'on':
            g.log.info(
                "Volume option 'features.quota-deem-statfs' "
                "is currently enabled for volume %s", volname)
            return True
        g.log.info(
            "Volume option 'features.quota-deem-statfs' "
            "is not in enabled state for volume %s", volname)
        return False
    g.log.error(
        "Failed to get status of 'features.quota-deem-statfs' "
        "option for the volume %s", volname)
    return False
Ejemplo n.º 12
0
def enable_and_validate_volume_options(mnode, volname, volume_options_list,
                                       time_delay=1):
    """Enable the volume option and validate whether the option has be
    successfully enabled or not
    Args:
        mnode (str): Node on which commands are executed.
        volname (str): Name of the volume.
        volume_options_list (str|list): A volume option|List of volume options
            to be enabled
        time_delay (int): Time delay between 2 volume set operations
    Returns:
        bool: True when enabling and validating all volume options is
            successful. False otherwise
    """

    volume_options_list = to_list(volume_options_list)

    for option in volume_options_list:
        # Set volume option to 'enable'
        g.log.info("Setting the volume option : %s", )
        ret = set_volume_options(mnode, volname, {option: "on"})
        if not ret:
            return False

        # Validate whether the option is set on the volume
        g.log.info("Validating the volume option : %s to be set to 'enable'",
                   option)
        option_dict = get_volume_options(mnode, volname, option)
        g.log.info("Options Dict: %s", option_dict)
        if not option_dict:
            g.log.error("%s is not enabled on the volume %s", option, volname)
            return False

        if option not in option_dict['name'] or "on" not in option_dict['value']:
            g.log.error("%s is not enabled on the volume %s", option, volname)
            return False

        g.log.info("%s is enabled on the volume %s", option, volname)
        time.sleep(time_delay)

    return True
    def _check_value_of_performance_client_io_threads(self, enabled=True):
        """Check value of performance.client-io-threads"""
        # Setting output value based on enabled param value
        value, instances = "off", 0
        if enabled:
            value, instances = "on", 3

        # Check if output value is same as expected or not
        ret = get_volume_options(self.mnode, self.volname,
                                 option="performance.client-io-threads")
        self.assertEqual(ret['performance.client-io-threads'], value,
                         "performance.client-io-threads value {} instead "
                         "of {}".format(ret['performance.client-io-threads'],
                                        value))

        # Check if io-threads is loaded or not based on enabled param value
        ret = occurences_of_pattern_in_file(
            self.mnode, 'io-threads', "/var/lib/glusterd/vols/{}/trusted-{}."
            "tcp-fuse.vol".format(self.volname, self.volname))
        self.assertEqual(ret, instances, "Number of io-threads more than {}"
                         .format(instances))
Ejemplo n.º 14
0
def get_tier_demote_frequency(mnode, volname):
    """Gets tier demote frequency value for given volume.

    Args:
        mnode (str): Node on which command has to be executed.
        volname (str): volume name

    Returns:
        NoneType: None if command execution fails, parse errors.
        str: demote frequency value on success.

    Examples:
        >>>get_tier_demote_frequency("abc.com", "testvol")
    """

    from glustolibs.gluster.volume_ops import get_volume_options
    vol_options = get_volume_options(mnode, volname)
    if vol_options is None:
        g.log.error("Failed to get volume options")
        return None

    return vol_options['cluster.tier-demote-frequency']
Ejemplo n.º 15
0
def is_uss_enabled(mnode, volname):
    """Check if uss is Enabled on the specified volume

    Args:
        mnode (str): Node on which cmd has to be executed.
        volname (str): volume name

    Returns:
        bool : True if successfully enabled uss on the volume. False otherwise.
    """
    from glustolibs.gluster.volume_ops import get_volume_options
    option_dict = get_volume_options(mnode=mnode, volname=volname,
                                     option="uss")
    if option_dict is None:
        g.log.error("USS is not set on the volume %s" % volname)
        return False

    if ('features.uss' in option_dict and
            option_dict['features.uss'] == 'enable'):
        return True
    else:
        return False
Ejemplo n.º 16
0
def get_tier_watermark_low_limit(mnode, volname):
    """Gets tier watermark low limit for given volume.

    Args:
        mnode (str): Node on which command has to be executed.
        volname (str): volume name

    Returns:
        NoneType: None if command execution fails, parse errors.
        str: tier watermark low limit on success.

    Examples:
        >>>get_tier_watermark_low_limit("abc.com", "testvol")
    """

    from glustolibs.gluster.volume_ops import get_volume_options
    vol_options = get_volume_options(mnode, volname)
    if vol_options is None:
        g.log.error("Failed to get volume options")
        return None

    return vol_options['cluster.watermark-low']
    def test_lower_gluster_op_version(self):
        """
        - Create volume
        - Get the volume op-version
        - Set the valid lower op-version
        - Set the invalid op-version
        """

        # Get the volume op-version
        ret = get_volume_options(self.mnode, self.volname,
                                 'cluster.op-version')
        self.assertIsNotNone(ret, "Failed to get the op-version")
        g.log.info("Successfully get the op-version")

        # Lowest opversion is 30000
        lowest_op_version = 30000
        invalid_op_version = "abc"
        lower_op_version_dict = {'cluster.op-version': lowest_op_version}
        invalid_op_version_dict = {'cluster.op-version': invalid_op_version}

        # Set the volume option with lower op-version
        ret = set_volume_options(self.mnode, 'all', lower_op_version_dict)
        self.assertFalse(
            ret, "Expected: Should not be able to set lower "
            "op-version \n Actual: Successfully set the lower"
            " op-version")
        g.log.info("Failed to set op-version %s as "
                   "expected", lowest_op_version)

        # Setting invalid opversion
        ret = set_volume_options(self.mnode, 'all', invalid_op_version_dict)
        self.assertFalse(
            ret, "Expected: Should not be able to set invalid "
            "op-version \n Actual: Successfully set the invalid"
            " op-version")
        g.log.info("Failed to set op-version %s as "
                   "expected", invalid_op_version)
Ejemplo n.º 18
0
def is_bitrot_enabled(mnode, volname):
    """Checks if bitrot is enabled on given volume

    Args:
        mnode (str): Node on which cmd has to be executed.
        volname (str): volume name

    Returns:
        True on success, False otherwise

    Example:
        is_bitrot_enabled("abc.com", testvol)
    """

    output = get_volume_options(mnode, volname, "features.bitrot")
    if output is None:
        return False

    g.log.info("Bitrot Status in volume %s: %s" %
               (volname, output["features.bitrot"]))
    if output["features.bitrot"] != 'on':
        return False

    return True
Ejemplo n.º 19
0
    def test_client_side_quorum_auto_local_to_volume_not_cluster(self):
        """
        - create four volume as below
            vol1->2x2
            vol2->2x2
            vol3->2x3
            vol4->2x3
            vol5->a pure distribute volume
        - do IO to all vols
        - set client side quorum to auto for vol1 and vol3
        - get the client side quorum value for all vols and check for result
        - bring down b0 on vol1 and b0 and b1 on vol3
        - try to create files on all vols and check for result
        """
        # pylint: disable=too-many-locals,too-many-statements
        # Creating files for all volumes
        for mount_point in self.mount_points:
            self.all_mounts_procs = []
            g.log.info('Creating files...')
            command = ("python %s create_files -f 50 "
                       "--fixed-file-size 1k %s" %
                       (self.script_upload_path, mount_point))

            proc = g.run_async(self.mounts[0].client_system, command)
            self.all_mounts_procs.append(proc)
            self.io_validation_complete = False

            # Validate IO
            self.assertTrue(
                validate_io_procs(self.all_mounts_procs, self.mounts),
                "IO failed on some of the clients")
            self.io_validation_complete = True

        volumes_to_change_options = ['1', '3']
        # set cluster.quorum-type to auto
        for vol_number in volumes_to_change_options:
            vol_name = ('testvol_distributed-replicated_%s' % vol_number)
            options = {"cluster.quorum-type": "auto"}
            g.log.info(
                "setting cluster.quorum-type to auto on "
                "volume testvol_distributed-replicated_%s", vol_number)
            ret = set_volume_options(self.mnode, vol_name, options)
            self.assertTrue(ret, ("Unable to set volume option %s for "
                                  "volume %s" % (options, vol_name)))
            g.log.info("Successfully set %s for volume %s", options, vol_name)

        # check is options are set correctly
        volume_list = get_volume_list(self.mnode)
        for volume in volume_list:
            g.log.info('Checking for cluster.quorum-type option for %s',
                       volume)
            volume_options_dict = get_volume_options(self.mnode, volume,
                                                     'cluster.quorum-type')
            if (volume == 'testvol_distributed-replicated_1'
                    or volume == 'testvol_distributed-replicated_3'
                    or volume == 'testvol_distributed-replicated_4'):
                self.assertEqual(
                    volume_options_dict['cluster.quorum-type'], 'auto',
                    'Option cluster.quorum-type '
                    'is not AUTO for %s' % volume)
                g.log.info('Option cluster.quorum-type is AUTO for %s', volume)
            else:
                self.assertEqual(
                    volume_options_dict['cluster.quorum-type'], 'none',
                    'Option cluster.quorum-type '
                    'is not NONE for %s' % volume)
                g.log.info('Option cluster.quorum-type is NONE for %s', volume)

        # Get first brick server and brick path
        # and get first file from filelist then delete it from volume
        vols_file_list = {}
        for volume in volume_list:
            brick_list = get_all_bricks(self.mnode, volume)
            brick_server, brick_path = brick_list[0].split(':')
            ret, file_list, _ = g.run(brick_server, 'ls %s' % brick_path)
            self.assertFalse(ret, 'Failed to ls files on %s' % brick_server)
            file_from_vol = file_list.splitlines()[0]
            ret, _, _ = g.run(brick_server,
                              'rm -rf %s/%s' % (brick_path, file_from_vol))
            self.assertFalse(ret, 'Failed to rm file on %s' % brick_server)
            vols_file_list[volume] = file_from_vol

        # bring bricks offline
        # bring first brick for testvol_distributed-replicated_1
        volname = 'testvol_distributed-replicated_1'
        brick_list = get_all_bricks(self.mnode, volname)
        bricks_to_bring_offline = brick_list[0:1]
        g.log.info('Bringing bricks %s offline...', bricks_to_bring_offline)
        ret = bring_bricks_offline(volname, bricks_to_bring_offline)
        self.assertTrue(
            ret, 'Failed to bring bricks %s offline' % bricks_to_bring_offline)

        ret = are_bricks_offline(self.mnode, volname, bricks_to_bring_offline)
        self.assertTrue(ret,
                        'Bricks %s are not offline' % bricks_to_bring_offline)
        g.log.info('Bringing bricks %s offline is successful',
                   bricks_to_bring_offline)

        # bring first two bricks for testvol_distributed-replicated_3
        volname = 'testvol_distributed-replicated_3'
        brick_list = get_all_bricks(self.mnode, volname)
        bricks_to_bring_offline = brick_list[0:2]
        g.log.info('Bringing bricks %s offline...', bricks_to_bring_offline)
        ret = bring_bricks_offline(volname, bricks_to_bring_offline)
        self.assertTrue(
            ret, 'Failed to bring bricks %s offline' % bricks_to_bring_offline)

        ret = are_bricks_offline(self.mnode, volname, bricks_to_bring_offline)
        self.assertTrue(ret,
                        'Bricks %s are not offline' % bricks_to_bring_offline)
        g.log.info('Bringing bricks %s offline is successful',
                   bricks_to_bring_offline)

        # merge two dicts (volname: file_to_delete) and (volname: mountpoint)
        temp_dict = [vols_file_list, self.mount_points_and_volnames]
        file_to_delete_to_mountpoint_dict = {}
        for k in vols_file_list.iterkeys():
            file_to_delete_to_mountpoint_dict[k] = (tuple(
                file_to_delete_to_mountpoint_dict[k]
                for file_to_delete_to_mountpoint_dict in temp_dict))

        # create files on all volumes and check for result
        for volname, file_and_mountpoint in \
                file_to_delete_to_mountpoint_dict.iteritems():
            filename, mountpoint = file_and_mountpoint

            # check for ROFS error for read-only file system for
            # testvol_distributed-replicated_1 and
            # testvol_distributed-replicated_3
            if (volname == 'testvol_distributed-replicated_1'
                    or volname == 'testvol_distributed-replicated_3'):
                # create new file taken from vols_file_list
                g.log.info("Start creating new file on all mounts...")
                all_mounts_procs = []
                cmd = ("touch %s/%s" % (mountpoint, filename))

                proc = g.run_async(self.client, cmd)
                all_mounts_procs.append(proc)

                # Validate IO
                g.log.info("Validating if IO failed with read-only filesystem")
                ret, _ = is_io_procs_fail_with_error(self, all_mounts_procs,
                                                     self.mounts,
                                                     self.mount_type)
                self.assertTrue(ret, ("Unexpected error and IO successful"
                                      " on read-only filesystem"))
                g.log.info("EXPECTED: "
                           "Read-only file system in IO while creating file")

            # check for no errors for all the rest volumes
            else:
                # create new file taken from vols_file_list
                g.log.info("Start creating new file on all mounts...")
                all_mounts_procs = []
                cmd = ("touch %s/%s" % (mountpoint, filename))

                proc = g.run_async(self.client, cmd)
                all_mounts_procs.append(proc)

                # Validate IO
                self.assertTrue(
                    validate_io_procs(all_mounts_procs, self.mounts),
                    "IO failed on some of the clients")
    def test_volume_set_option_data_self_heal(self):
        """
        - turn off self-heal-daemon option
        - turn off data-self-heal option
        - check if the options are set correctly
        - create IO
        - calculate arequal
        If it is distribute-replicate, the  areequal-check sum of nodes
        in each replica set should match
        - bring down "brick1"
        - modify IO
        - bring back the brick1
        - execute "find . | xargs stat" from the mount point
        to trigger background data self-heal
        - calculate arequal
        If it is distribute-replicate, arequal's checksum of brick which
        was down should not match with the bricks which was up
        in the replica set but for other replicaset where all bricks are up
        should match the areequal-checksum
        - check if the data of existing files are not modified in brick1
        - turn on the option data-self-heal
        - execute "find . -type f  | xargs md5sum" from the mount point
        - wait for heal to complete
        - calculate areequal
        If it is distribute-replicate, the  areequal-check sum of nodes
        in each replica set should match
        """
        # pylint: disable=too-many-locals,too-many-statements,too-many-branches

        all_bricks = get_all_bricks(self.mnode, self.volname)

        # Setting options
        options = {"self-heal-daemon": "off", "data-self-heal": "off"}
        g.log.info('Setting options %s...', options)
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(ret, 'Failed to set options %s' % options)
        g.log.info("Successfully set %s for volume %s", options, self.volname)

        # Check if options are set to off
        options_dict = get_volume_options(self.mnode, self.volname)
        self.assertEqual(options_dict['cluster.self-heal-daemon'], 'off',
                         'Option self-heal-daemon is not set to off')
        self.assertEqual(options_dict['cluster.data-self-heal'], 'off',
                         'Option data-self-heal is not set to off')
        g.log.info('Option are set to off: %s', options)

        # Creating files on client side
        for mount_obj in self.mounts:
            g.log.info("Generating data for %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            # Create files
            g.log.info('Creating files and dirs...')
            command = ('cd %s ; '
                       'mkdir test_data_self_heal ;'
                       'cd test_data_self_heal ; '
                       'for i in `seq 1 100` ; '
                       'do dd if=/dev/urandom of=file.$i bs=128K count=$i ; '
                       'done ;' % mount_obj.mountpoint)

            proc = g.run_async(mount_obj.client_system,
                               command,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
        self.io_validation_complete = False

        # Validate IO
        self.assertTrue(validate_io_procs(self.all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")
        self.io_validation_complete = True

        # Check arequals
        # get the subvolumes
        g.log.info("Starting to get sub-volumes for volume %s", self.volname)
        subvols_dict = get_subvols(self.mnode, self.volname)
        num_subvols = len(subvols_dict['volume_subvols'])
        g.log.info("Number of subvolumes in volume: %s", num_subvols)

        # Get arequals and compare
        for i in range(0, num_subvols):
            # Get arequal for first brick
            subvol_brick_list = subvols_dict['volume_subvols'][i]
            node, brick_path = subvol_brick_list[0].split(':')
            command = ('arequal-checksum -p %s '
                       '-i .glusterfs -i .landfill -i .trashcan' % brick_path)
            ret, arequal, _ = g.run(node, command)
            first_brick_total = arequal.splitlines()[-1].split(':')[-1]

            # Get arequal for every brick and compare with first brick
            for brick in subvol_brick_list:
                node, brick_path = brick.split(':')
                command = ('arequal-checksum -p %s '
                           '-i .glusterfs -i .landfill -i .trashcan' %
                           brick_path)
                ret, brick_arequal, _ = g.run(node, command)
                self.assertFalse(ret,
                                 'Failed to get arequal on brick %s' % brick)
                g.log.info('Getting arequal for %s is successful', brick)
                brick_total = brick_arequal.splitlines()[-1].split(':')[-1]

                self.assertEqual(
                    first_brick_total, brick_total,
                    'Arequals for subvol and %s are not equal' % brick)
                g.log.info('Arequals for subvol and %s are equal', brick)
        g.log.info('All arequals are equal for distributed-replicated')

        # Select bricks to bring offline
        bricks_to_bring_offline = [get_all_bricks(self.mnode, self.volname)[0]]

        # Get files/dir size
        g.log.info('Getting file/dir list on brick to be offline')
        node, brick_path = bricks_to_bring_offline[0].split(':')
        # Get files/dir list
        command = 'cd %s ; ls' % brick_path
        ret, file_list, _ = g.run(node, command)
        self.assertFalse(ret, 'Failed to ls files on %s' % node)
        brick_file_dir_list = file_list.splitlines()
        # Get files/dir size before bringing brick offline
        g.log.info('Getting file/dir size on brick to be offline')
        brick_file_dir_dict_before_offline = {}
        for file_dir in brick_file_dir_list:
            command = 'cd %s ; du -h %s' % (brick_path, file_dir)
            ret, file_info, _ = g.run(node, command)
            self.assertFalse(ret, 'Failed to get file size on %s' % node)
            file_size = file_info.split('\t')[0]
            brick_file_dir_dict_before_offline[file_dir] = file_size

        # Bring brick 1 offline
        g.log.info('Bringing bricks %s offline...', bricks_to_bring_offline)
        ret = bring_bricks_offline(self.volname, bricks_to_bring_offline)
        self.assertTrue(
            ret, 'Failed to bring bricks %s offline' % bricks_to_bring_offline)

        ret = are_bricks_offline(self.mnode, self.volname,
                                 bricks_to_bring_offline)
        self.assertTrue(ret,
                        'Bricks %s are not offline' % bricks_to_bring_offline)
        g.log.info('Bringing bricks %s offline is successful',
                   bricks_to_bring_offline)

        # Modify data
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Adding data for %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            # changing files
            g.log.info('Creating dirs and files...')
            command = ('cd test_data_self_heal ; '
                       'for i in `seq 1 100` ; '
                       'do dd if=/dev/urandom of=file.$i bs=512K count=$i ; '
                       'done ;' % mount_obj.mountpoint)

            proc = g.run_async(mount_obj.client_system,
                               command,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
        self.io_validation_complete = False

        # Validate IO
        self.assertTrue(validate_io_procs(self.all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")
        self.io_validation_complete = True

        # Bring brick online
        g.log.info('Bringing bricks %s online...', bricks_to_bring_offline)
        ret = bring_bricks_online(self.mnode, self.volname,
                                  bricks_to_bring_offline)
        self.assertTrue(
            ret, 'Failed to bring bricks %s online' % bricks_to_bring_offline)
        g.log.info('Bringing bricks %s online is successful',
                   bricks_to_bring_offline)

        # Trigger heal from mount point
        g.log.info('Triggering heal from mount point...')
        for mount_obj in self.mounts:
            g.log.info("Triggering heal for %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            command = ('cd %s/test_data_self_heal ; find . | xargs stat' %
                       mount_obj.mountpoint)
            ret, _, _ = g.run(mount_obj.client_system, command)
            self.assertFalse(
                ret, 'Failed to start "find . | xargs stat" '
                'on %s' % mount_obj.client_system)

        # Check arequals
        g.log.info("Starting to get sub-volumes for volume %s", self.volname)
        subvols_dict = get_subvols(self.mnode, self.volname)
        subvols = subvols_dict['volume_subvols']

        # Get arequals for first subvol and compare
        first_brick = all_bricks[0]
        node, brick_path = first_brick.split(':')
        command = ('arequal-checksum -p %s '
                   '-i .glusterfs -i .landfill -i .trashcan' % brick_path)
        ret, arequal, _ = g.run(node, command)
        first_brick_total = arequal.splitlines()[-1].split(':')[-1]

        for brick in subvols[0]:
            g.log.info('Getting arequal on bricks %s...', brick)
            node, brick_path = brick.split(':')
            command = ('arequal-checksum -p %s '
                       '-i .glusterfs -i .landfill -i .trashcan' % brick_path)
            ret, arequal, _ = g.run(node, command)
            self.assertFalse(ret, 'Failed to get arequal on brick %s' % brick)
            g.log.info('Getting arequal for %s is successful', brick)
            brick_total = arequal.splitlines()[-1].split(':')[-1]

            if brick != first_brick:
                self.assertNotEqual(
                    first_brick_total, brick_total,
                    'Arequals for mountpoint and %s '
                    'are equal' % brick)
                g.log.info('Arequals for mountpoint and %s are not equal',
                           brick)
            else:
                self.assertEqual(
                    first_brick_total, brick_total,
                    'Arequals for mountpoint and %s '
                    'are not equal' % brick)
                g.log.info('Arequals for mountpoint and %s are equal', brick)

        # Get arequals for all subvol except first and compare
        num_subvols = len(subvols_dict['volume_subvols'])
        for i in range(1, num_subvols):
            # Get arequal for first brick
            subvol_brick_list = subvols_dict['volume_subvols'][i]
            node, brick_path = subvol_brick_list[0].split(':')
            command = ('arequal-checksum -p %s '
                       '-i .glusterfs -i .landfill -i .trashcan' % brick_path)
            ret, arequal, _ = g.run(node, command)
            first_brick_total = arequal.splitlines()[-1].split(':')[-1]

            # Get arequal for every brick and compare with first brick
            for brick in subvol_brick_list:
                node, brick_path = brick.split(':')
                command = ('arequal-checksum -p %s '
                           '-i .glusterfs -i .landfill -i .trashcan' %
                           brick_path)
                ret, brick_arequal, _ = g.run(node, command)
                self.assertFalse(ret,
                                 'Failed to get arequal on brick %s' % brick)
                g.log.info('Getting arequal for %s is successful', brick)
                brick_total = brick_arequal.splitlines()[-1].split(':')[-1]

                self.assertEqual(
                    first_brick_total, brick_total,
                    'Arequals for subvol and %s are not equal' % brick)
                g.log.info('Arequals for subvol and %s are equal', brick)
        g.log.info('All arequals are equal for distributed-replicated')

        # Get files/dir size after bringing brick online
        g.log.info('Getting file/dir size on brick after bringing online')
        brick_file_dir_dict_after_online = {}
        for file_dir in brick_file_dir_list:
            command = 'cd %s ; du -h %s' % (brick_path, file_dir)
            ret, file_info, _ = g.run(node, command)
            self.assertFalse(ret, 'Failed to get file size on %s' % node)
            file_size = file_info.split('\t')[0]
            brick_file_dir_dict_after_online[file_dir] = file_size

        # Compare dicts with file size
        g.log.info('Compare file/dir size on brick before bringing offline and'
                   ' after bringing online')
        self.assertFalse(
            cmp(brick_file_dir_dict_before_offline,
                brick_file_dir_dict_after_online),
            'file/dir size on brick before bringing offline and '
            'after bringing online are not equal')
        g.log.info('file/dir size on brick before bringing offline and '
                   'after bringing online are equal')

        # Setting options
        options = {"data-self-heal": "on"}
        g.log.info('Setting options %s...', options)
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(ret, 'Failed to set options %s' % options)
        g.log.info("Option 'data-self-heal' is set to 'on' successfully")

        # Start heal from mount point
        g.log.info('Starting heal from mount point...')
        for mount_obj in self.mounts:
            g.log.info("Start heal for %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            command = ('cd %s/test_data_self_heal ; '
                       ' find . | xargs md5sum' % mount_obj.mountpoint)
            _, _, _ = g.run(mount_obj.client_system, command)

        # Monitor heal completion
        ret = monitor_heal_completion(self.mnode, self.volname)
        self.assertTrue(ret, 'Heal has not yet completed')

        # Check if heal is completed
        ret = is_heal_complete(self.mnode, self.volname)
        self.assertTrue(ret, 'Heal is not complete')
        g.log.info('Heal is completed successfully')

        # Check for split-brain
        ret = is_volume_in_split_brain(self.mnode, self.volname)
        self.assertFalse(ret, 'Volume is in split-brain state')
        g.log.info('Volume is not in split-brain state')

        # Check arequals
        # get the subvolumes
        g.log.info("Starting to get sub-volumes for volume %s", self.volname)
        subvols_dict = get_subvols(self.mnode, self.volname)
        num_subvols = len(subvols_dict['volume_subvols'])
        g.log.info("Number of subvolumes in volume: %s", num_subvols)

        # Get arequals and compare
        for i in range(0, num_subvols):
            # Get arequal for first brick
            subvol_brick_list = subvols_dict['volume_subvols'][i]
            node, brick_path = subvol_brick_list[0].split(':')
            command = ('arequal-checksum -p %s '
                       '-i .glusterfs -i .landfill -i .trashcan' % brick_path)
            ret, arequal, _ = g.run(node, command)
            first_brick_total = arequal.splitlines()[-1].split(':')[-1]

            # Get arequal for every brick and compare with first brick
            for brick in subvol_brick_list:
                node, brick_path = brick.split(':')
                command = ('arequal-checksum -p %s '
                           '-i .glusterfs -i .landfill -i .trashcan' %
                           brick_path)
                ret, brick_arequal, _ = g.run(node, command)
                self.assertFalse(ret,
                                 'Failed to get arequal on brick %s' % brick)
                g.log.info('Getting arequal for %s is successful', brick)
                brick_total = brick_arequal.splitlines()[-1].split(':')[-1]

                self.assertEqual(
                    first_brick_total, brick_total,
                    'Arequals for subvol and %s are not equal' % brick)
                g.log.info('Arequals for subvol and %s are equal', brick)
        g.log.info('All arequals are equal for distributed-replicated')
Ejemplo n.º 21
0
    def test_volume_get(self):
        """
        desc: performing different combinations of gluter
        volume get functionalities
        1. Create a gluster cluster
        2. Get the option from the non-existing volume,
        gluster volume get <non-existing vol> io-cache
        3. Get all options from the non-existing volume,
        gluster volume get <non-existing volume > all
        4. Provide a incorrect command syntax to get the options
        from the volume
            gluster volume get <vol-name>
            gluster volume get
            gluster volume get io-cache
        5. Create any type of volume in the cluster
        6. Get the value of the non-existing option
            gluster volume get <vol-name> temp.key
        7. get all options set on the volume
            gluster volume get <vol-name> all
        8. get the specific option set on the volume
            gluster volume get <vol-name> io-cache
        9. Set an option on the volume
            gluster volume set <vol-name> performance.low-prio-threads 14
        10. Get all the options set on the volume and check
        for low-prio-threads
            gluster volume get <vol-name> all then get the
            low-prio-threads value
        11. Get all the options set on the volume
                gluster volume get <vol-name> all
        12.  Check for any cores in "cd /"
        """
        # pylint: disable=too-many-statements

        # time stamp of current test case
        ret, test_timestamp, _ = g.run_local('date +%s')
        test_timestamp = test_timestamp.strip()

        # performing gluster volume get command for non exist volume io-cache
        self.non_exist_volume = "abc99"
        ret, _, err = g.run(
            self.mnode,
            "gluster volume get %s io-cache" % self.non_exist_volume)
        self.assertNotEqual(
            ret, 0, "gluster volume get command should fail "
            "for non existing volume with io-cache "
            "option :%s" % self.non_exist_volume)
        msg = ('Volume ' + self.non_exist_volume + ' does not exist')
        self.assertIn(
            msg, err, "No proper error message for non existing "
            "volume with io-cache option :%s" % self.non_exist_volume)
        g.log.info(
            "gluster volume get command failed successfully for non "
            "existing volume with io-cache option"
            ":%s", self.non_exist_volume)

        # performing gluster volume get all command for non exist volume
        ret, _, err = g.run(
            self.mnode, "gluster volume get %s all" % self.non_exist_volume)
        self.assertNotEqual(
            ret, 0, "gluster volume get command should fail "
            "for non existing volume %s with all "
            "option" % self.non_exist_volume)
        self.assertIn(
            msg, err, "No proper error message for non existing "
            "volume with all option:%s" % self.non_exist_volume)
        g.log.info(
            "gluster volume get command failed successfully for non "
            "existing volume with all option :%s", self.non_exist_volume)

        # performing gluster volume get command for non exist volume
        ret, _, err = g.run(self.mnode, "gluster volume get "
                            "%s" % self.non_exist_volume)
        self.assertNotEqual(
            ret, 0, "gluster volume get command should "
            "fail for non existing volume :%s" % self.non_exist_volume)
        msg = 'get <VOLNAME|all> <key|all>'
        self.assertIn(
            msg, err, "No proper error message for non existing "
            "volume :%s" % self.non_exist_volume)
        g.log.info(
            "gluster volume get command failed successfully for non "
            "existing volume :%s", self.non_exist_volume)

        # performing gluster volume get command without any volume name given
        ret, _, err = g.run(self.mnode, "gluster volume get")
        self.assertNotEqual(ret, 0, "gluster volume get command should fail")
        self.assertIn(
            msg, err, "No proper error message for gluster "
            "volume get command")
        g.log.info("gluster volume get command failed successfully")

        # performing gluster volume get io-cache command
        # without any volume name given
        ret, _, err = g.run(self.mnode, "gluster volume get io-cache")
        self.assertNotEqual(
            ret, 0, "gluster volume get io-cache command "
            "should fail")
        self.assertIn(
            msg, err, "No proper error message for gluster volume "
            "get io-cache command")
        g.log.info("gluster volume get io-cache command failed successfully")

        # gluster volume get volname with non existing option
        ret, _, err = g.run(self.mnode, "gluster volume "
                            "get %s temp.key" % self.volname)
        self.assertNotEqual(
            ret, 0, "gluster volume get command should fail "
            "for existing volume %s with non-existing "
            "option" % self.volname)
        msg = 'Did you mean auth.allow or ...reject?'
        self.assertIn(
            msg, err, "No proper error message for existing "
            "volume %s with non-existing option" % self.volname)
        g.log.info(
            "gluster volume get command failed successfully for "
            "existing volume %s with non existing option", self.volname)

        # performing gluster volume get volname all

        ret = get_volume_options(self.mnode, self.volname, "all")
        self.assertIsNotNone(
            ret, "gluster volume get %s all command "
            "failed" % self.volname)
        g.log.info(
            "gluster volume get %s all command executed "
            "successfully", self.volname)

        # performing gluster volume get volname io-cache
        ret = get_volume_options(self.mnode, self.volname, "io-cache")
        self.assertIsNotNone(
            ret, "gluster volume get %s io-cache command "
            "failed" % self.volname)
        self.assertIn("on", ret['performance.io-cache'], "io-cache value "
                      "is not correct")
        g.log.info("io-cache value is correct")

        # Performing gluster volume set volname performance.low-prio-threads
        prio_thread = {'performance.low-prio-threads': '14'}
        ret = set_volume_options(self.mnode, self.volname, prio_thread)
        self.assertTrue(
            ret, "gluster volume set %s performance.low-prio-"
            "threads failed" % self.volname)
        g.log.info(
            "gluster volume set %s "
            "performance.low-prio-threads executed successfully", self.volname)

        # Performing gluster volume get all, checking low-prio threads value
        ret = get_volume_options(self.mnode, self.volname, "all")
        self.assertIsNotNone(
            ret, "gluster volume get %s all "
            "failed" % self.volname)
        self.assertIn("14", ret['performance.low-prio-threads'],
                      "performance.low-prio-threads value is not correct")
        g.log.info("performance.low-prio-threads value is correct")

        # performing gluster volume get volname all
        ret = get_volume_options(self.mnode, self.volname, "all")
        self.assertIsNotNone(
            ret, "gluster volume get %s all command "
            "failed" % self.volname)
        g.log.info(
            "gluster volume get %s all command executed "
            "successfully", self.volname)

        # Checking core file created or not in "/" directory
        ret = is_core_file_created(self.servers, test_timestamp)
        self.assertTrue(ret, "glusterd service should not crash")
        g.log.info("No core file found, glusterd service "
                   "running successfully")
    def test_snap_scheduler_status(self):
        # pylint: disable=too-many-statements
        """
        Steps:
        1. create volumes
        2. initialise snap scheduler without
           enabling shared storage should fail
        3. enable shared storage
        4. initialise snap scheduler
        5. check snapshot scheduler status
        """
        # Validate shared storage is enabled
        g.log.info("Starting to validate shared storage volume")
        volinfo = get_volume_options(self.mnode,
                                     self.volname,
                                     option=("cluster.enable"
                                             "-shared-storage"))
        if volinfo["cluster.enable-shared-storage"] == "disable":
            # Initialise snapshot scheduler
            g.log.info("Initialising snapshot scheduler on all nodes")
            ret = scheduler_init(self.servers)
            self.assertFalse(
                ret, "Unexpected: Successfully initialized "
                "scheduler on all nodes")
            g.log.info("As Expected, Failed to initialize scheduler on "
                       "all nodes")
        self.assertEqual(volinfo["cluster.enable-shared-storage"], "disable",
                         "Unexpected: Shared storage "
                         "is enabled on cluster")

        # Enable Shared storage
        g.log.info("enabling shared storage")
        ret = enable_shared_storage(self.mnode)
        self.assertTrue(ret, "Failed to enable shared storage")
        g.log.info("Successfully enabled shared storage")

        # Validate shared storage mounted
        g.log.info("validate shared storage mounted")
        ret = is_shared_volume_mounted(self.mnode)
        self.assertTrue(ret, "Failed to mount shared volume")
        g.log.info("Successfully mounted shared volume")

        # Validate shared storage volume is enabled
        g.log.info("validate shared storage volume")
        volinfo = get_volume_options(self.mnode,
                                     self.volname,
                                     option=("cluster.enable"
                                             "-shared-storage"))
        self.assertIsNotNone(volinfo, "Failed to validate volume option")
        self.assertEqual(volinfo["cluster.enable-shared-storage"], "enable",
                         "Failed to enable shared storage volume")
        g.log.info("Shared storage enabled successfully")

        # Initialise snap scheduler
        g.log.info("Initialising snapshot scheduler on all nodes")
        count = 0
        while count < 40:
            ret = scheduler_init(self.servers)
            if ret:
                break
            time.sleep(2)
            count += 1
        self.assertTrue(ret, "Failed to initialize scheduler on all nodes")
        g.log.info("Successfully initialized scheduler on all nodes")

        # Enable snap scheduler
        g.log.info("Enabling snap scheduler")
        ret, _, _ = scheduler_enable(self.mnode)
        self.assertEqual(ret, 0,
                         "Failed to enable scheduler on %s node" % self.mnode)
        g.log.info("Successfully enabled scheduler on %s node", self.mnode)

        # Check snapshot scheduler status
        g.log.info("checking status of snapshot scheduler")
        for server in self.servers:
            count = 0
            while count < 40:
                ret, status, _ = scheduler_status(server)
                if ret == 0:
                    self.assertEqual(status.strip().split(":")[2], ' Enabled',
                                     "Failed to check status of scheduler")
                    break
                time.sleep(2)
                count += 1
            self.assertEqual(
                ret, 0, "Failed to check status of scheduler"
                " on nodes %s" % server)
            g.log.info("Successfully checked scheduler status on %s nodes",
                       server)

        # disable snap scheduler
        g.log.info("disabling snap scheduler")
        ret, _, _ = scheduler_disable(self.mnode)
        self.assertEqual(ret, 0, "Unexpected: Failed to disable "
                         "snapshot scheduler")
        g.log.info("Successfully disabled snapshot scheduler")

        # disable shared storage
        g.log.info("starting to disable shared storage")
        ret = disable_shared_storage(self.mnode)
        self.assertTrue(ret, "Failed to disable shared storage")
        g.log.info("Successfully disabled shared storage")

        # Validate shared volume unmounted
        g.log.info("Validate shared volume unmounted")
        ret = is_shared_volume_unmounted(self.mnode)
        self.assertTrue(ret, "Failed to unmount shared storage")
        g.log.info("Successfully unmounted shared storage")
Ejemplo n.º 23
0
    def test_eagerlock_while_io_in_progress(self):
        '''
        Create replica volume then mount the volume, once
        volume mounted successfully on client, start running IOs on
        mount point then run the "gluster volume <volname> profile info"
        command on all clusters randomly.
        Then check that IOs completed successfully or not on mount point.
        Check that files in mount point listing properly or not.
        check the release directory value should be less or equals '4'
        '''

        status_on = "on"
        validate_profiles = ('cluster.eager-lock',
                             'diagnostics.count-fop-hits',
                             'diagnostics.latency-measurement')

        ret, _, _ = profile_start(random.choice(self.servers), self.volname)
        self.assertEqual(
            ret, 0,
            ("Volume profile failed to start for volume %s" % self.volname))

        for validate_profile in validate_profiles:
            out = get_volume_options(random.choice(self.servers),
                                     self.volname,
                                     option=(validate_profile))
            self.assertIsNotNone(
                out, "Volume get failed for volume "
                "%s" % self.volname)
            self.assertEqual(out[validate_profile], status_on, "Failed to "
                             "match profile information")

        # Mounting a volume
        ret = self.mount_volume(self.mounts)
        self.assertTrue(ret, "Volume mount failed for %s" % self.volname)

        # run IOs
        g.log.info("Starting IO on all mounts...")
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = (
                "/usr/bin/env python %s create_deep_dirs_with_files "
                "--dirname-start-num %d "
                "--dir-depth 2 "
                "--dir-length 15 "
                "--max-num-of-dirs 5 "
                "--num-of-files 25 %s" %
                (self.script_upload_path, self.counter, mount_obj.mountpoint))

            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
            self.counter = self.counter + 10
        self.io_validation_complete = False

        # this command should not get hang while io is in progress
        # pylint: disable=unused-variable
        for i in range(20):
            ret, _, _ = profile_info(random.choice(self.servers), self.volname)
            self.assertEqual(ret, 0, ("Volume profile info failed on "
                                      "volume %s" % self.volname))

        # Validate IO
        ret = validate_io_procs(self.all_mounts_procs, self.mounts)
        self.io_validation_complete = True
        self.assertTrue(ret, "IO failed on some of the clients")

        # List all files and dirs created
        ret = list_all_files_and_dirs_mounts(self.mounts)
        self.assertTrue(ret, "Failed to list all files and dirs")
        g.log.info("Listing all files and directories is successful")

        volume_profile_info = "gluster v profile %s info"
        _, out, _ = g.run(
            random.choice(self.servers),
            volume_profile_info % self.volname + " | grep"
            "OPENDIR | awk '{print$8}'")
        self.assertIsNotNone(
            out, "Failed to get volume %s profile info" % self.volname)
        out.strip().split('\n')
        for value in out:
            self.assertLessEqual(
                value, '4', "Failed to Validate profile"
                " on volume %s" % self.volname)
    def test_default_granular_entry_heal(self):
        """
        Test case:
        1. Create a cluster.
        2. Create volume start it and mount it.
        3. Check if cluster.granular-entry-heal is ON by default or not.
        4. Check /var/lib/glusterd/<volname>/info for
           cluster.granular-entry-heal=on.
        5. Check if option granular-entry-heal is present in the
           volume graph or not.
        6. Kill one or two bricks of the volume depending on volume type.
        7. Create all types of files on the volume like text files, hidden
           files, link files, dirs, char device, block device and so on.
        8. Bring back the killed brick by restarting the volume.
        9. Wait for heal to complete.
        10. Check arequal-checksum of all the bricks and see if it's proper or
            not.
        """
        # Check if cluster.granular-entry-heal is ON by default or not
        ret = get_volume_options(self.mnode, self.volname,
                                 'granular-entry-heal')
        self.assertEqual(
            ret['cluster.granular-entry-heal'], 'on',
            "Value of cluster.granular-entry-heal not on "
            "by default")

        # Check var/lib/glusterd/<volname>/info for
        # cluster.granular-entry-heal=on
        ret = occurences_of_pattern_in_file(
            self.mnode, 'cluster.granular-entry-heal=on',
            '/var/lib/glusterd/vols/{}/info'.format(self.volname))
        self.assertEqual(
            ret, 1, "Failed get cluster.granular-entry-heal=on in"
            " info file")

        # Check if option granular-entry-heal is present in the
        # volume graph or not
        ret = occurences_of_pattern_in_file(
            self.first_client, 'option granular-entry-heal on',
            "/var/log/glusterfs/mnt-{}_{}.log".format(self.volname,
                                                      self.mount_type))
        self.assertTrue(ret > 0,
                        "Failed to find granular-entry-heal in volume graph")
        g.log.info("granular-entry-heal properly set to ON by default")

        # Kill one or two bricks of the volume depending on volume type
        self._bring_bricks_offline()

        # Create all types of files on the volume like text files, hidden
        # files, link files, dirs, char device, block device and so on
        cmd = ("cd {};mkdir mydir;cd mydir;mkdir dir;mkdir .hiddendir;"
               "touch file;touch .hiddenfile;mknod blockfile b 1 5;"
               "mknod charfile b 1 5; mkfifo pipefile;touch fileforhardlink;"
               "touch fileforsoftlink;ln fileforhardlink hardlinkfile;"
               "ln -s fileforsoftlink softlinkfile".format(self.mountpoint))
        ret, _, _ = g.run(self.first_client, cmd)
        self.assertFalse(ret, "Failed to create files of all types")

        # Bring back the killed brick by restarting the volume	Bricks should
        # be online again
        self._restart_volume_and_bring_all_offline_bricks_online()

        # Wait for heal to complete
        self._wait_for_heal_to_completed()

        # Check arequal-checksum of all the bricks and see if it's proper or
        # not
        self._check_arequal_checksum_for_the_volume()
Ejemplo n.º 25
0
    def test_clone_delete_snap(self):
        """
        clone from snap of one volume
        * Create and Mount the volume
        * Enable some volume options
        * Creating 2 snapshots and activate
        * reset the volume
        * create a clone of snapshots created
        * Mount both the clones
        * Perform I/O on mount point
        * Check volume options of cloned volumes
        * Create snapshot of the cloned snapshot volume
        * cleanup snapshots and volumes
        """

        # pylint: disable=too-many-statements, too-many-locals
        # Enabling Volume options on the volume and validating
        g.log.info("Enabling volume options for volume %s ", self.volname)
        options = {" features.uss": "enable"}
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(
            ret, ("Failed to set volume options for volume %s" % self.volname))
        g.log.info("Successfully set volume options"
                   "for volume %s", self.volname)

        # Validate feature.uss enabled or not
        g.log.info("Validating feature.uss is enabled")
        option = "features.uss"
        vol_option = get_volume_options(self.mnode, self.volname, option)
        self.assertEqual(vol_option['features.uss'], 'enable', "Failed"
                         " to validate "
                         "volume options")
        g.log.info("Successfully validated volume options"
                   "for volume %s", self.volname)

        # Creating snapshot
        g.log.info("Starting to Create snapshot")
        for snap_count in range(0, 2):
            ret, _, _ = snap_create(self.mnode, self.volname,
                                    "snap%s" % snap_count)
            self.assertEqual(
                ret, 0,
                ("Failed to create snapshot for volume %s" % self.volname))
            g.log.info("Snapshot snap%s created successfully"
                       "for volume %s", snap_count, self.volname)

        # Activating snapshot
        g.log.info("Starting to Activate Snapshot")
        for snap_count in range(0, 2):
            ret, _, _ = snap_activate(self.mnode, "snap%s" % snap_count)
            self.assertEqual(
                ret, 0, ("Failed to Activate snapshot snap%s" % snap_count))
            g.log.info("Snapshot snap%s activated successfully", snap_count)

        # Reset volume:
        g.log.info("Starting to Reset Volume")
        ret, _, _ = volume_reset(self.mnode, self.volname, force=False)
        self.assertEqual(ret, 0, ("Failed to reset volume %s" % self.volname))
        g.log.info("Reset Volume on volume %s is Successful", self.volname)

        # Validate feature.uss enabled or not
        g.log.info("Validating feature.uss is enabled")
        option = "features.uss"
        vol_option = get_volume_options(self.mnode, self.volname, option)
        self.assertEqual(vol_option['features.uss'], 'off', "Failed"
                         " to validate "
                         "volume options")
        g.log.info("Successfully validated volume options"
                   "for volume %s", self.volname)

        # Verify volume's all process are online
        g.log.info("Starting to Verify volume's all process are online")
        ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
        self.assertTrue(ret, ("Volume %s : All process are"
                              "not online" % self.volname))
        g.log.info("Volume %s : All process are online", self.volname)

        # Creating and starting a Clone of snapshot
        g.log.info("Starting to Clone Snapshot")
        for clone_count in range(0, 2):
            ret, _, _ = snap_clone(self.mnode, "snap%s" % clone_count,
                                   "clone%s" % clone_count)
            self.assertEqual(ret, 0,
                             ("Failed to clone clone%s volume" % clone_count))
            g.log.info("clone%s volume created successfully", clone_count)

        # Start Cloned volume
        g.log.info("starting to Validate clone volumes are started")
        for clone_count in range(0, 2):
            ret, _, _ = volume_start(self.mnode, "clone%s" % clone_count)
            self.assertEqual(ret, 0, ("Failed to start clone%s" % clone_count))
            g.log.info("clone%s started successfully", clone_count)
        g.log.info("All the clone volumes are started Successfully")

        # Validate Volume start of cloned volume
        g.log.info("Starting to Validate Volume start")
        for clone_count in range(0, 2):
            vol_info = get_volume_info(self.mnode, "clone%s" % clone_count)
            if vol_info["clone%s" % clone_count]['statusStr'] != 'Started':
                raise ExecutionError("Failed to get volume info for clone%s" %
                                     clone_count)
            g.log.info("Volume clone%s is in Started state", clone_count)

        # Validate feature.uss enabled or not
        g.log.info("Validating feature.uss is enabled")
        option = "features.uss"
        for clone_count in range(0, 2):
            vol_option = get_volume_options(self.mnode,
                                            "clone%s" % clone_count, option)
            self.assertEqual(vol_option['features.uss'], 'enable', "Failed"
                             " to validate"
                             "volume options")
            g.log.info(
                "Successfully validated volume options"
                "for volume clone%s", clone_count)

        # Mount both the cloned volumes
        g.log.info("Mounting Cloned Volumes")
        for mount_obj in range(0, 2):
            self.mpoint = "/mnt/clone%s" % mount_obj
            cmd = "mkdir -p  %s" % self.mpoint
            ret, _, _ = g.run(self.clients[0], cmd)
            self.assertEqual(ret, 0, ("Creation of directory %s"
                                      "for mounting"
                                      "volume %s failed: Directory already"
                                      "present" %
                                      (self.mpoint, "clone%s" % mount_obj)))
            g.log.info(
                "Creation of directory %s for mounting volume %s "
                "success", self.mpoint, ("clone%s" % mount_obj))
            ret, _, _ = mount_volume("clone%s" % mount_obj, self.mount_type,
                                     self.mpoint, self.mnode, self.clients[0])
            self.assertEqual(ret, 0, ("clone%s is not mounted" % mount_obj))
            g.log.info("clone%s is mounted Successfully", mount_obj)

        # Perform I/O on mount
        # Start I/O on all mounts
        g.log.info("Starting to Perform I/O on Mountpoint")
        all_mounts_procs = []
        for mount_obj in range(0, 2):
            cmd = ("cd /mnt/clone%s/; for i in {1..10};"
                   "do touch file$i; done; cd;") % mount_obj
            proc = g.run(self.clients[0], cmd)
            all_mounts_procs.append(proc)
        g.log.info("I/O on mountpoint is successful")

        # create snapshot
        g.log.info("Starting to Create snapshot of clone volume")
        ret0, _, _ = snap_create(self.mnode, "clone0", "snap2")
        self.assertEqual(ret0, 0, "Failed to create the snapshot"
                         "snap2 from clone0")
        g.log.info("Snapshots snap2 created successfully from clone0")
        ret1, _, _ = snap_create(self.mnode, "clone1", "snap3")
        self.assertEqual(ret1, 0, "Failed to create the snapshot snap3"
                         "from clone1")
        g.log.info("Snapshots snap3 created successfully from clone1")

        # Listing all Snapshots present
        g.log.info("Starting to list all snapshots")
        ret, _, _ = snap_list(self.mnode)
        self.assertEqual(ret, 0, ("Failed to list snapshots present"))
        g.log.info("Snapshots successfully listed")
Ejemplo n.º 26
0
    def test_server_side_healing_happens_only_when_glustershd_running(self):
        """
        Test Script which verifies that the server side healing must happen
        only if the heal daemon is running on the node where source brick
        resides.

         * Create and start the Replicate volume
         * Check the glustershd processes - Only 1 glustershd should be listed
         * Bring down the bricks without affecting the cluster
         * Create files on volume
         * kill the glustershd on node where bricks is running
         * bring the bricks up which was killed in previous steps
         * check the heal info - heal info must show pending heal info, heal
           shouldn't happen since glustershd is down on source node
         * issue heal
         * trigger client side heal
         * heal should complete successfully
        """
        # pylint: disable=too-many-locals,too-many-statements,too-many-lines

        # Disable granular heal if not disabled already
        granular = get_volume_options(self.mnode, self.volname,
                                      'granular-entry-heal')
        if granular['cluster.granular-entry-heal'] == 'on':
            ret = disable_granular_heal(self.mnode, self.volname)
            self.assertTrue(ret, "Unable to set granular-entry-heal to on")

        # Setting Volume options
        options = {
            "metadata-self-heal": "on",
            "entry-self-heal": "on",
            "data-self-heal": "on"
        }
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(ret, 'Failed to set options %s' % options)
        g.log.info("Successfully set %s for volume %s", options, self.volname)

        # Check the self-heal daemon process
        ret, pids = get_self_heal_daemon_pid(self.servers)
        self.assertTrue(ret, ("Either No self heal daemon process found or "
                              "more than One self heal daemon process "
                              "found : %s" % pids))
        g.log.info(
            "Successful in verifying self heal daemon process"
            " on all nodes %s", self.servers)

        # Select the bricks to bring offline
        bricks_to_bring_offline = (select_volume_bricks_to_bring_offline(
            self.mnode, self.volname))
        g.log.info("Brick List to bring offline : %s", bricks_to_bring_offline)

        # Bring down the selected bricks
        ret = bring_bricks_offline(self.volname, bricks_to_bring_offline)
        self.assertTrue(ret, "Failed to bring down the bricks")
        g.log.info("Brought down the brick process "
                   "for %s", bricks_to_bring_offline)

        # Write files on all mounts
        all_mounts_procs, num_files_to_write = [], 100
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s create_files "
                   "-f %d --base-file-name file %s" %
                   (self.script_upload_path, num_files_to_write,
                    mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # Validate IO
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("IO is successful on all mounts")

        # Get online bricks list
        online_bricks = get_online_bricks_list(self.mnode, self.volname)
        g.log.info("Online Bricks for volume %s : %s", self.volname,
                   online_bricks)

        # Get the nodes where bricks are running
        bring_offline_glustershd_nodes = []
        for brick in online_bricks:
            bring_offline_glustershd_nodes.append(brick.split(":")[0])
        g.log.info("self heal deamon on nodes %s to be killed",
                   bring_offline_glustershd_nodes)

        # Kill the self heal daemon process on nodes
        ret = bring_self_heal_daemon_process_offline(
            bring_offline_glustershd_nodes)
        self.assertTrue(
            ret, ("Unable to bring self heal daemon process"
                  " offline for nodes %s" % bring_offline_glustershd_nodes))
        g.log.info(
            "Sucessfully brought down self heal process for "
            "nodes %s", bring_offline_glustershd_nodes)

        # Check the heal info
        heal_info = get_heal_info_summary(self.mnode, self.volname)
        g.log.info("Successfully got heal info %s for the volume %s",
                   heal_info, self.volname)

        # Bring bricks online
        ret = bring_bricks_online(self.mnode, self.volname,
                                  bricks_to_bring_offline, 'glusterd_restart')
        self.assertTrue(
            ret,
            ("Failed to bring bricks: %s online" % bricks_to_bring_offline))

        # Issue heal
        ret = trigger_heal_full(self.mnode, self.volname)
        self.assertFalse(ret,
                         ("Able to trigger heal on volume %s where "
                          "self heal daemon is not running" % self.volname))
        g.log.info(
            "Expected : Unable to trigger heal on volume %s where "
            "self heal daemon is not running", self.volname)

        # Wait for 130 sec to heal
        ret = monitor_heal_completion(self.mnode, self.volname, 130)
        self.assertFalse(ret, ("Heal Completed on volume %s" % self.volname))
        g.log.info("Expected : Heal pending on volume %s", self.volname)

        # Check the heal info
        heal_info_after_triggering_heal = get_heal_info_summary(
            self.mnode, self.volname)
        g.log.info("Successfully got heal info for the volume %s",
                   self.volname)

        # Compare with heal pending with the files wrote
        for node in online_bricks:
            self.assertGreaterEqual(
                int(heal_info_after_triggering_heal[node]['numberOfEntries']),
                num_files_to_write,
                ("Some of the files are healed from source bricks %s where "
                 "self heal daemon is not running" % node))
        g.log.info("EXPECTED: No files are healed from source bricks where "
                   "self heal daemon is not running")

        # Unmount and Mount volume again as volume options were set
        # after mounting the volume
        for mount_obj in self.mounts:
            ret, _, _ = umount_volume(mount_obj.client_system,
                                      mount_obj.mountpoint)
            self.assertEqual(ret, 0,
                             "Failed to unmount %s" % mount_obj.client_system)
            ret, _, _ = mount_volume(self.volname,
                                     mtype='glusterfs',
                                     mpoint=mount_obj.mountpoint,
                                     mserver=self.mnode,
                                     mclient=mount_obj.client_system)
            self.assertEqual(ret, 0,
                             "Failed to mount %s" % mount_obj.client_system)

        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("cd %s;for i in `seq 1 5`; do ls -l;cat *; stat *; sleep 5;"
                   " done " % (mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # Validate IO
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "Reads failed on some of the clients")
        g.log.info("Reads successful on all mounts")

        # Wait for heal to complete
        ret = monitor_heal_completion(self.mnode, self.volname)
        self.assertTrue(ret, "Unable to heal the pending entries")
        g.log.info("Successfully healed the pending entries for volume %s",
                   self.volname)
Ejemplo n.º 27
0
    def test_op_version(self):
        '''
        -> Create Volume
        -> Get the current op-version
        -> Get the max supported op-version
        -> Verify vol info file exists or not in all servers
        -> Get the version number from vol info file
        -> If current op-version is less than max-op-version
        set the current op-version to max-op-version
        -> After vol set operation verify that version number
        increased by one or not in vol info file
        -> verify that current-op-version and max-op-version same or not.
        '''

        # Getting current op-version
        vol_dict = get_volume_options(self.mnode, 'all', 'cluster.op-version')
        current_op_version = int(vol_dict['cluster.op-version'])

        # Getting Max op-verison
        all_dict = get_volume_options(self.mnode, 'all')
        max_op_version = int(all_dict['cluster.max-op-version'])

        # File_path: path for vol info file
        # Checking vol file exist in all servers or not
        file_path = '/var/lib/glusterd/vols/' + self.volname + '/info'
        for server in self.servers:
            conn = g.rpyc_get_connection(server)
            ret = conn.modules.os.path.isfile(file_path)
            self.assertTrue(ret, "Vol file not found in server %s" % server)
            g.log.info("vol file found in server %s", server)
        g.rpyc_close_deployed_servers()

        # Getting version number from vol info file
        # cmd: grepping  version from vol info file
        ret, out, _ = g.run(self.mnode,
                            ' '.join(['grep', "'^version'", file_path]))
        version_list = out.split('=')
        version_no = int(version_list[1]) + 1

        # Comparing current op-version and max op-version
        if current_op_version < max_op_version:

            # Set max-op-version
            ret = set_volume_options(self.mnode, 'all',
                                     {'cluster.op-version': max_op_version})
            self.assertTrue(ret, "Failed to set max op-version for cluster")
            g.log.info("Setting up max-op-version is successful for cluster")

            # Grepping version number from vol info file after
            # vol set operation
            ret, out, _ = g.run(self.mnode,
                                ' '.join(['grep', "'^version'", file_path]))
            version_list = out.split('=')
            after_version_no = int(version_list[1])

            # Comparing version number before and after vol set operations
            self.assertEqual(
                version_no, after_version_no,
                "After volume set operation version "
                "number not increased by one")
            g.log.info("After volume set operation version number "
                       "increased by one")

            # Getting current op-version
            vol_dict = get_volume_options(self.mnode, 'all',
                                          'cluster.op-version')
            current_op_version = int(vol_dict['cluster.op-version'])

        # Checking current-op-version and max-op-version equal or not
        self.assertEqual(
            current_op_version, max_op_version,
            "Current op-version and max op-version "
            "are not same")
        g.log.info("current-op-version and max-op-version of cluster are same")
    def test_volume_checksum_after_changing_network_ping_timeout(self):

        # Create Volume
        # Mount the Volume
        # Create some files on mount point
        # calculate the checksum of Mount point
        # Check the default network ping timeout of the volume.
        # Change network ping timeout to some other value
        # calculate checksum again
        # checksum should be same without remounting the volume.

        # Mounting volume as glusterfs
        ret = self.mount_volume(self.mounts)
        self.assertTrue(ret, "volume mount failed for %s" % self.volname)
        g.log.info("Volume mounted successfully : %s", self.volname)

        # Checking volume mounted or not
        ret = is_mounted(self.volname, self.mounts[0].mountpoint, self.mnode,
                         self.mounts[0].client_system, self.mount_type)
        self.assertTrue(ret, "Volume not mounted on mount point: %s"
                        % self.mounts[0].mountpoint)
        g.log.info("Volume %s mounted on %s", self.volname,
                   self.mounts[0].mountpoint)

        # run IOs
        g.log.info("Starting IO on all mounts...")
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_files -f 10 --base-file-name newfile %s"
                   % (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system, cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)

        # Validate IO
        g.log.info("Wait for IO to complete and validate IO ...")
        ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("IO is successful on all mounts")

        # Checksum calculation of mount point before
        # changing network.ping-timeout
        ret, before_checksum = collect_mounts_arequal(self.mounts)
        self.assertTrue(ret, "checksum failed to calculate for mount point")
        g.log.info("checksum calculated successfully")

        # List all files and dirs created
        g.log.info("List all files and directories:")
        ret = list_all_files_and_dirs_mounts(self.mounts)
        self.assertTrue(ret, "Failed to list all files and dirs")
        g.log.info("Listing all files and directories is successful")

        # performing gluster volume get volname all and
        # getting network ping time out value
        volume_options = get_volume_options(self.mnode, self.volname, "all")
        self.assertIsNotNone(volume_options, "gluster volume get %s all "
                                             "command failed" % self.volname)
        g.log.info("gluster volume get %s all command executed "
                   "successfully", self.volname)
        ret = False
        if re.search(r'\b42\b', volume_options['network.ping-timeout']):
            ret = True
        self.assertTrue(ret, "network ping time out value is not correct")
        g.log.info("network ping time out value is correct")

        # Changing network ping time out value to specific volume
        self.networking_ops = {'network.ping-timeout': '12'}
        ret = set_volume_options(self.mnode, self.volname,
                                 self.networking_ops)
        self.assertTrue(ret, "Changing of network.ping-timeout "
                             "failed for :%s" % self.volname)
        g.log.info("Changing of network.ping-timeout "
                   "success for :%s", self.volname)

        # Checksum calculation of mount point after
        # changing network.ping-timeout
        ret, after_checksum = collect_mounts_arequal(self.mounts)
        self.assertTrue(ret, "checksum failed to calculate for mount point")
        g.log.info("checksum calculated successfully")

        # comparing list of checksums of mountpoints before and after
        # network.ping-timeout change
        self.assertItemsEqual(before_checksum, after_checksum,
                              "Checksum not same before and after "
                              "network.ping-timeout change")
        g.log.info("checksum same before and after "
                   "changing network.ping-timeout")

        # List all files and dirs created
        g.log.info("List all files and directories:")
        ret = list_all_files_and_dirs_mounts(self.mounts)
        self.assertTrue(ret, "Failed to list all files and dirs")
        g.log.info("Listing all files and directories is successful")
Ejemplo n.º 29
0
    def test_client_side_quorum_with_auto_option_overwrite_fixed(self):
        """
        Test Script to verify the Client Side Quorum with auto option

        * check the default value of cluster.quorum-type
        * try to set any junk value to cluster.quorum-type
          other than {none,auto,fixed}
        * check the default value of cluster.quorum-count
        * set cluster.quorum-type to fixed and cluster.quorum-count to 1
        * start I/O from the mount point
        * kill 2 of the brick process from the each replica set.
        * set cluster.quorum-type to auto

        """
        # pylint: disable=too-many-locals,too-many-statements
        # check the default value of cluster.quorum-type
        option = "cluster.quorum-type"
        g.log.info("Getting %s for the volume %s", option, self.volname)
        option_dict = get_volume_options(self.mnode, self.volname, option)
        self.assertIsNotNone(option_dict,
                             ("Failed to get %s volume option"
                              " for volume %s" % (option, self.volname)))
        self.assertEqual(option_dict['cluster.quorum-type'], 'auto',
                         ("Default value for %s is not auto"
                          " for volume %s" % (option, self.volname)))
        g.log.info("Succesfully verified default value of %s for volume %s",
                   option, self.volname)

        # set the junk value to cluster.quorum-type
        junk_values = ["123", "abcd", "fixxed", "Aauto"]
        for each_junk_value in junk_values:
            options = {"cluster.quorum-type": "%s" % each_junk_value}
            g.log.info("setting %s for the volume "
                       "%s", options, self.volname)
            ret = set_volume_options(self.mnode, self.volname, options)
            self.assertFalse(ret, ("Able to set junk value %s for "
                                   "volume %s" % (options, self.volname)))
            g.log.info(
                "Expected: Unable to set junk value %s "
                "for volume %s", options, self.volname)

        # check the default value of cluster.quorum-count
        option = "cluster.quorum-count"
        g.log.info("Getting %s for the volume %s", option, self.volname)
        option_dict = get_volume_options(self.mnode, self.volname, option)
        self.assertIsNotNone(option_dict,
                             ("Failed to get %s volume option"
                              " for volume %s" % (option, self.volname)))
        self.assertEqual(option_dict['cluster.quorum-count'], '(null)',
                         ("Default value for %s is not null"
                          " for volume %s" % (option, self.volname)))
        g.log.info("Successful in getting %s for the volume %s", option,
                   self.volname)

        # set cluster.quorum-type to fixed and cluster.quorum-count to 1
        options = {"cluster.quorum-type": "fixed", "cluster.quorum-count": "1"}
        g.log.info("setting %s for the volume %s", options, self.volname)
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(ret, ("Unable to set %s for volume %s" %
                              (options, self.volname)))
        g.log.info("Successfully set %s for volume %s", options, self.volname)

        # create files
        g.log.info("Starting IO on all mounts...")
        g.log.info("mounts: %s", self.mounts)
        cmd = ("/usr/bin/env python %s create_files "
               "-f 10 --base-file-name file %s" %
               (self.script_upload_path, self.mounts[0].mountpoint))
        ret, _, err = g.run(self.mounts[0].client_system, cmd)
        self.assertFalse(
            ret,
            "IO failed on %s with '%s'" % (self.mounts[0].client_system, err))

        # get the subvolumes
        g.log.info("starting to get subvolumes for volume %s", self.volname)
        subvols_dict = get_subvols(self.mnode, self.volname)
        num_subvols = len(subvols_dict['volume_subvols'])
        g.log.info("Number of subvolumes in volume %s is %s", self.volname,
                   num_subvols)

        # bring bricks offline( 2 bricks ) for all the subvolumes
        for i in range(0, num_subvols):
            subvol_brick_list = subvols_dict['volume_subvols'][i]
            g.log.info("sub-volume %s brick list : %s", i, subvol_brick_list)
            bricks_to_bring_offline = subvol_brick_list[0:2]
            g.log.info("Going to bring down the brick process "
                       "for %s", bricks_to_bring_offline)
            ret = bring_bricks_offline(self.volname, bricks_to_bring_offline)
            self.assertTrue(ret, ("Failed to bring down the bricks. Please "
                                  "check the log file for more details."))
            g.log.info("Brought down the brick process "
                       "for %s successfully", bricks_to_bring_offline)

        # create files
        g.log.info("Starting IO on all mounts...")
        g.log.info("mounts: %s", self.mounts)
        cmd = ("/usr/bin/env python %s create_files "
               "-f 10 --base-file-name second_file %s" %
               (self.script_upload_path, self.mounts[0].mountpoint))
        ret, _, err = g.run(self.mounts[0].client_system, cmd)
        self.assertFalse(
            ret,
            "IO failed on %s with '%s'" % (self.mounts[0].client_system, err))

        # set cluster.quorum-type to auto
        options = {"cluster.quorum-type": "auto"}
        g.log.info("setting %s for volume %s", options, self.volname)
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(ret, ("Unable to set volume option %s for "
                              "volume %s" % (options, self.volname)))
        g.log.info("Successfully set %s for volume %s", options, self.volname)

        # create files
        all_mounts_procs = []
        g.log.info("Starting IO on mountpount...")
        g.log.info("mounts: %s", self.mounts)
        cmd = ("mkdir %s/newdir && touch %s/newdir/myfile{1..3}.txt" %
               (self.mounts[0].mountpoint, self.mounts[0].mountpoint))
        proc = g.run_async(self.mounts[0].client_system,
                           cmd,
                           user=self.mounts[0].user)
        all_mounts_procs.append(proc)

        # Validate IO
        g.log.info("Validating whether IO failed with "
                   "Transport endpoint is not connected")
        ret, _ = is_io_procs_fail_with_error(self, all_mounts_procs,
                                             self.mounts, self.mount_type)
        self.assertTrue(ret, ("Unexpected error and IO successful"
                              " on not connected transport endpoint"))
        g.log.info("EXPECTED: Transport endpoint is not connected"
                   " while creating files")
Ejemplo n.º 30
0
    def test_cli_reset_commands_behaviour(self):
        """
        1. Set cluster.brick-multiplex to enabled.
        2. Create and start 2 volumes of type 1x3 and 2x3.
        3. Check if cluster.brick-multiplex is enabled.
        4. Reset the cluster using "gluster v reset all".
        5. Check if cluster.brick-multiplex is disabled.
        6. Create a new volume of type 2x3.
        7. Set cluster.brick-multiplex to enabled.
        8. Stop and start all three volumes.
        9. Check the if pids match and check if more
           than one pids of glusterfsd is present.
        """
        # pylint: disable=too-many-statements
        # Setup Volumes
        self.volume_configs = []

        # Define volumes to create
        # Define replicated volume
        self.volume['voltype'] = {
            'type': 'replicated',
            'replica_count': 3,
            'transport': 'tcp'}

        volume_config = {'name': '%s' % self.volume['voltype']['type'],
                         'servers': self.servers,
                         'voltype': self.volume['voltype']}
        self.volume_configs.append(volume_config)

        # Define 2x3 distributed-replicated volume
        self.volume['voltype'] = {
            'type': 'distributed-replicated',
            'dist_count': 2,
            'replica_count': 3,
            'transport': 'tcp'}

        volume_config = {'name': '%s' % self.volume['voltype']['type'],
                         'servers': self.servers,
                         'voltype': self.volume['voltype']}
        self.volume_configs.append(volume_config)

        # Create volumes using the config.
        self.use_config_setup_volumes()

        # Check if volume option cluster.brick-multiplex is enabled
        volume_list = get_volume_list(self.mnode)
        for volume in volume_list:
            options_dict = get_volume_options(self.mnode, volume)
            self.assertEqual(options_dict['cluster.brick-multiplex'], 'enable',
                             'Option brick-multiplex is not enabled')
            g.log.info('Option brick-multiplex is enabled for volume %s',
                       volume)

        # Reset cluster
        g.log.info("Reset cluster...")
        cmd = 'gluster v reset all'
        ret, _, _ = g.run(self.mnode, cmd)
        self.assertFalse(ret, "Failed on reset cluster")
        g.log.info("Successfully reset cluster")

        # Check if brick-multiplex is disabled
        g.log.info("Checking for brick multiplexing status...")
        self.assertEqual('disable', get_brick_mux_status(self.mnode),
                         "Brick multiplexing status is not 'disable'")
        g.log.info("Brick multiplexing status is 'disable'")

        # Create new distributed-replicated volume
        # Define new 2x3 distributed-replicated volume
        new_vol = 'new_vol'
        self.volume['voltype'] = {
            'type': 'distributed-replicated',
            'dist_count': 2,
            'replica_count': 3,
            'transport': 'tcp'}

        volume_config = {'name': '%s' % new_vol,
                         'servers': self.servers,
                         'voltype': self.volume['voltype']}
        self.volume_configs.append(volume_config)

        # Create volumes using the config.
        self.use_config_setup_volumes()

        # Resetting brick-mux back to enabled.
        g.log.info("Enabling brick multiplexing...")
        if not enable_brick_mux(self.mnode):
            raise ExecutionError("Failed to enable brick multiplexing")
        g.log.info("Enabled brick multiplexing successfully")

        # Restart all volumes
        g.log.info("Restarting all volumes...")
        volume_list = get_volume_list(self.mnode)
        for volume in volume_list:
            # Stop the volume
            g.log.info("Stopping volume %s...", volume)
            ret, _, err = volume_stop(self.mnode, volume)
            self.assertFalse(ret, "Failed on stopping volume %s: %s"
                             % (volume, err))
            g.log.info("Stopped %s successfully", volume)

            # Sleeping for 2 seconds between stop and start.
            sleep(2)

            # Start the volume
            g.log.info("Starting volume %s...", volume)
            ret, _, err = volume_start(self.mnode, volume)
            self.assertFalse(ret, "Failed on starting volume %s: %s"
                             % (volume, err))
            g.log.info("Started %s successfully", volume)
        g.log.info("Restarted all volumes successfully")

        # Check if bricks pid don`t match glusterfsd pid
        g.log.info("Checking if bricks pid don`t match glusterfsd pid...")
        for volume in volume_list:
            g.log.info("Checking if bricks pid don`t match glusterfsd pid "
                       "for %s volume...", volume)
            self.assertTrue(
                check_brick_pid_matches_glusterfsd_pid(self.mnode, volume),
                "Bricks pid match glusterfsd pid for %s volume..." % volume)
            g.log.info("Bricks pid don`t match glusterfsd pid "
                       "for %s volume...", volume)

        # Checking if the number of glusterfsd is more than one
        for server in self.servers:
            ret = get_brick_processes_count(server)
            self.assertEqual(ret, 1,
                             "Number of glusterfsd is more than one.")
        g.log.info("Only one glusterfsd found on all the nodes.")