def test_nfs_ganesha_enable_disable_cluster(self):
        """
        Tests script to check nfs-ganehsa volume gets exported after
        multiple enable/disable of cluster.
        """

        for i in range(5):
            g.log.info(
                "Executing multiple enable/disable of nfs ganesha "
                "cluster. Count : %s ", str(i))

            ret, _, _ = disable_nfs_ganesha(self.mnode)
            self.assertEqual(ret, 0, ("Failed to disable nfs-ganesha cluster"))

            time.sleep(2)
            vol_option = get_volume_options(self.mnode,
                                            self.volname,
                                            option='ganesha.enable')
            if vol_option is None:
                self.assertEqual(ret, 0, ("Failed to get ganesha.enable volume"
                                          " option for %s " % self.volume))

            self.assertEqual(
                vol_option.get('ganesha.enable'), 'off', "Failed "
                "to unexport volume by default after disabling "
                "cluster")

            ret, _, _ = enable_nfs_ganesha(self.mnode)
            self.assertEqual(ret, 0, ("Failed to enable nfs-ganesha cluster"))

            time.sleep(2)
            vol_option = get_volume_options(self.mnode,
                                            self.volname,
                                            option='ganesha.enable')
            if vol_option is None:
                self.assertEqual(ret, 0, ("Failed to get ganesha.enable volume"
                                          " option for %s " % self.volume))

            self.assertEqual(
                vol_option.get('ganesha.enable'), 'off', "Volume "
                "%s is exported by default after disable and "
                "enable of cluster which is unexpected." % self.volname)

        # Export volume after disable and enable of cluster
        ret, _, _ = export_nfs_ganesha_volume(mnode=self.mnode,
                                              volname=self.volname)
        self.assertEqual(
            ret, 0, ("Failed to export volume %s "
                     "after disable and enable of cluster" % self.volname))
        time.sleep(5)

        # List the volume exports
        _, _, _ = g.run(self.mnode, "showmount -e")
    def tearDownClass(cls,
                      umount_vol=True,
                      cleanup_vol=True,
                      teardown_nfs_ganesha_cluster=True):
        """Teardown the export, mounts and volume.
        """

        # Unmount volume
        if umount_vol:
            _rc = True
            for mount_obj in cls.mounts:
                ret = mount_obj.unmount()
                if not ret:
                    g.log.error("Unable to unmount volume '%s:%s' on '%s:%s'",
                                mount_obj.server_system, mount_obj.volname,
                                mount_obj.client_system, mount_obj.mountpoint)
                    _rc = False
            if not _rc:
                raise ExecutionError("Unmount of all mounts are not "
                                     "successful")

        # Cleanup volume
        if cleanup_vol:

            volinfo = get_volume_info(cls.mnode, cls.volname)
            if volinfo is None or cls.volname not in volinfo:
                g.log.info("Volume %s does not exist in %s" %
                           (cls.volname, cls.mnode))
            else:
                # Unexport volume, if it is not unexported already
                vol_option = get_volume_options(cls.mnode,
                                                cls.volname,
                                                option='ganesha.enable')
                if vol_option is None:
                    raise ExecutionError("Failed to get ganesha.enable volume "
                                         " option for %s " % cls.volume)
                if vol_option['ganesha.enable'] != 'off':
                    if is_volume_exported(cls.mnode, cls.volname, "nfs"):
                        ret, out, err = unexport_nfs_ganesha_volume(
                            mnode=cls.mnode, volname=cls.volname)
                        if ret != 0:
                            raise ExecutionError(
                                "Failed to unexport volume %s" % cls.volname)
                        time.sleep(5)
                else:
                    g.log.info("Volume %s is unexported already" % cls.volname)

                _, _, _ = g.run(cls.mnode, "showmount -e")

            ret = cleanup_volume(mnode=cls.mnode, volname=cls.volname)
            if not ret:
                raise ExecutionError("cleanup volume %s failed", cls.volname)

        # All Volume Info
        volume_info(cls.mnode)

        (NfsGaneshaClusterSetupClass.tearDownClass.im_func(
            cls, delete_nfs_ganesha_cluster=teardown_nfs_ganesha_cluster))
Exemple #3
0
    def test_lower_gluster_op_version(self):
        """
        - Create volume
        - Get the volume op-version
        - Set the valid lower op-version
        - Set the invalid op-version
        """

        # Get the volume op-version
        ret = get_volume_options(self.mnode, self.volname,
                                 'cluster.op-version')
        self.assertIsNotNone(ret, "Failed to get the op-version")
        g.log.info("Successfully get the op-version")

        # Lowest opversion is 30000
        lowest_op_version = 30000
        invalid_op_version = "abc"
        lower_op_version_dict = {'cluster.op-version': lowest_op_version}
        invalid_op_version_dict = {'cluster.op-version': invalid_op_version}

        # Set the volume option with lower op-version
        ret = set_volume_options(self.mnode, 'all', lower_op_version_dict)
        self.assertFalse(
            ret, "Expected: Should not be able to set lower "
            "op-version \n Actual: Successfully set the lower"
            " op-version")
        g.log.info("Failed to set op-version %s as "
                   "expected", lowest_op_version)

        # Setting invalid opversion
        ret = set_volume_options(self.mnode, 'all', invalid_op_version_dict)
        self.assertFalse(
            ret, "Expected: Should not be able to set invalid "
            "op-version \n Actual: Successfully set the invalid"
            " op-version")
        g.log.info("Failed to set op-version %s as "
                   "expected", invalid_op_version)
    def tearDown(self):

        # Clean up the volumes created specific for this tests.
        for i in range(5):
            volname = "nfsvol" + str(i)
            volinfo = get_volume_info(self.mnode, volname)
            if volinfo is None or volname not in volinfo:
                g.log.info("Volume %s does not exist in %s", volname,
                           self.mnode)
                continue

            # Unexport volume, if it is not unexported already
            vol_option = get_volume_options(self.mnode,
                                            volname,
                                            option='ganesha.enable')
            if vol_option is None:
                raise ExecutionError("Failed to get ganesha.enable volume "
                                     " option for %s " % volname)
            if vol_option['ganesha.enable'] != 'off':
                if is_volume_exported(self.mnode, volname, "nfs"):
                    ret, _, _ = unexport_nfs_ganesha_volume(mnode=self.mnode,
                                                            volname=volname)
                    if ret != 0:
                        raise ExecutionError("Failed to unexport volume %s " %
                                             volname)
                    time.sleep(5)
            else:
                g.log.info("Volume %s is unexported already", volname)

            _, _, _ = g.run(self.mnode, "showmount -e")

            ret = cleanup_volume(mnode=self.mnode, volname=volname)
            if not ret:
                raise ExecutionError("cleanup volume %s failed" % volname)

        NfsGaneshaIOBaseClass.tearDown.im_func(self)
    def test_nfs_ganesha_enable_disable_cluster(self):
        """
        Tests script to check nfs-ganehsa volume gets exported after
        multiple enable/disable of cluster.
        """
        for i in range(1, 6):
            g.log.info(
                "Executing multiple enable/disable of nfs ganesha "
                "cluster. Count : %s ", str(i))

            ret, _, _ = disable_nfs_ganesha(self.mnode)
            self.assertEqual(ret, 0, "Failed to disable nfs-ganesha cluster")

            sleep(2)
            vol_option = get_volume_options(self.mnode,
                                            self.volname,
                                            option='ganesha.enable')
            if vol_option is None:
                self.assertEqual(ret, 0, ("Failed to get ganesha.enable volume"
                                          " option for %s " % self.volume))

            self.assertEqual(
                vol_option.get('ganesha.enable'), 'off', "Failed "
                "to unexport volume by default after disabling "
                "cluster")

            ret, _, _ = enable_nfs_ganesha(self.mnode)
            self.assertEqual(ret, 0, "Failed to enable nfs-ganesha cluster")

            # Check nfs-ganesha status
            for itr in range(5):
                ret = is_nfs_ganesha_cluster_in_healthy_state(self.mnode)
                if ret:
                    g.log.info("nfs-ganesha cluster is healthy")
                    break
                g.log.warning(
                    "nfs-ganesha cluster is not healthy. "
                    "Iteration: %s", str(itr))
                self.assertEqual(
                    itr, 4, "Wait timeout: nfs-ganesha cluster "
                    "is not healthy")
                sleep(3)

            vol_option = get_volume_options(self.mnode,
                                            self.volname,
                                            option='ganesha.enable')
            if vol_option is None:
                self.assertEqual(ret, 0, ("Failed to get ganesha.enable volume"
                                          " option for %s " % self.volume))

            self.assertEqual(
                vol_option.get('ganesha.enable'), 'off', "Volume "
                "%s is exported by default after disable and "
                "enable of cluster which is unexpected." % self.volname)

            # Export volume after disable and enable of cluster
            ret, _, _ = export_nfs_ganesha_volume(mnode=self.mnode,
                                                  volname=self.volname)
            self.assertEqual(
                ret, 0, ("Failed to export volume %s "
                         "after disable and enable of cluster" % self.volname))

            # Wait for volume to get exported
            ret = wait_for_nfs_ganesha_volume_to_get_exported(
                self.mnode, self.volname)
            self.assertTrue(
                ret, "Volume %s is not exported after setting "
                "ganesha.enable 'on'" % self.volname)
            g.log.info("Exported volume after enabling nfs-ganesha cluster")
    def setUpClass(cls):
        """Setup volume exports volume with nfs-ganesha,
            mounts the volume.
        """
        NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)

        # Peer probe servers
        ret = peer_probe_servers(cls.mnode, cls.servers)
        if not ret:
            raise ExecutionError("Failed to peer probe servers")

        g.log.info("All peers are in connected state")

        # Peer Status from mnode
        peer_status(cls.mnode)

        for server in cls.servers:
            mount_info = [{
                'protocol': 'glusterfs',
                'mountpoint': '/run/gluster/shared_storage',
                'server': server,
                'client': {
                    'host': server
                },
                'volname': 'gluster_shared_storage',
                'options': ''
            }]

            mount_obj = create_mount_objs(mount_info)
            if not mount_obj[0].is_mounted():
                ret = mount_obj[0].mount()
                if not ret:
                    raise ExecutionError(
                        "Unable to mount volume '%s:%s' "
                        "on '%s:%s'" %
                        (mount_obj.server_system, mount_obj.volname,
                         mount_obj.client_system, mount_obj.mountpoint))

        # Setup Volume
        ret = setup_volume(mnode=cls.mnode,
                           all_servers_info=cls.all_servers_info,
                           volume_config=cls.volume,
                           force=True)
        if not ret:
            raise ExecutionError("Setup volume %s failed", cls.volume)
        time.sleep(10)

        # Export volume with nfs ganesha, if it is not exported already
        vol_option = get_volume_options(cls.mnode,
                                        cls.volname,
                                        option='ganesha.enable')
        if vol_option is None:
            raise ExecutionError("Failed to get ganesha.enable volume option "
                                 "for %s " % cls.volume)
        if vol_option['ganesha.enable'] != 'on':
            ret, out, err = export_nfs_ganesha_volume(mnode=cls.mnode,
                                                      volname=cls.volname)
            if ret != 0:
                raise ExecutionError(
                    "Failed to export volume %s "
                    "as NFS export", cls.volname)
            time.sleep(5)

        ret = wait_for_nfs_ganesha_volume_to_get_exported(
            cls.mnode, cls.volname)
        if not ret:
            raise ExecutionError("Failed to export volume %s. volume is "
                                 "not listed in showmount" % cls.volname)
        else:
            g.log.info("Volume %s is exported successfully" % cls.volname)

        # Log Volume Info and Status
        ret = log_volume_info_and_status(cls.mnode, cls.volname)
        if not ret:
            raise ExecutionError("Logging volume %s info and status failed",
                                 cls.volname)

        # Create Mounts
        _rc = True
        for mount_obj in cls.mounts:
            ret = mount_obj.mount()
            if not ret:
                g.log.error("Unable to mount volume '%s:%s' on '%s:%s'",
                            mount_obj.server_system, mount_obj.volname,
                            mount_obj.client_system, mount_obj.mountpoint)
                _rc = False
        if not _rc:
            raise ExecutionError("Mounting volume %s on few clients failed",
                                 cls.volname)

        # Get info of mount before the IO
        log_mounts_info(cls.mounts)
    def test_client_side_quorum_auto_local_to_volume_not_cluster(self):
        """
        - create four volume as below
            vol1->2x2
            vol2->2x2
            vol3->2x3
            vol4->2x3
            vol5->a pure distribute volume
        - do IO to all vols
        - set client side quorum to auto for vol1 and vol3
        - get the client side quorum value for all vols and check for result
        - bring down b0 on vol1 and b0 and b1 on vol3
        - try to create files on all vols and check for result
        """
        # Creating files for all volumes
        for mount_point in self.mount_points:
            self.all_mounts_procs = []
            for mount_obj in self.mounts:
                g.log.info("Generating data for %s:%s"
                           % (mount_obj.client_system, mount_point))
                # Create files
                g.log.info('Creating files...')
                command = ("python %s create_files -f 50 "
                           "--fixed-file-size 1k %s"
                           % (self.script_upload_path, mount_point))

                proc = g.run_async(mount_obj.client_system, command,
                                   user=mount_obj.user)
                self.all_mounts_procs.append(proc)
            self.io_validation_complete = False

            # Validate IO
            g.log.info("Wait for IO to complete and validate IO ...")
            ret = validate_io_procs(self.all_mounts_procs, self.mounts)
            self.assertTrue(ret, "IO failed on some of the clients")
            self.io_validation_complete = True
            g.log.info("IO is successful on all mounts")

        volumes_to_change_options = ['1', '3']
        # set cluster.quorum-type to auto
        for vol_number in volumes_to_change_options:
            vol_name = ('testvol_distributed-replicated_%s'
                        % vol_number)
            options = {"cluster.quorum-type": "auto"}
            g.log.info("setting cluster.quorum-type to auto on "
                       "volume testvol_distributed-replicated_%s"
                       % vol_number)
            ret = set_volume_options(self.mnode, vol_name, options)
            self.assertTrue(ret, ("Unable to set volume option %s for "
                                  "volume %s" % (options, vol_name)))
            g.log.info("Sucessfully set %s for volume %s"
                       % (options, vol_name))

        # check is options are set correctly
        volume_list = get_volume_list(self.mnode)
        for volume in volume_list:
            g.log.info('Checking for cluster.quorum-type option for %s'
                       % volume)
            volume_options_dict = get_volume_options(self.mnode,
                                                     volume,
                                                     'cluster.quorum-type')
            if (volume == 'testvol_distributed-replicated_1' or
                    volume == 'testvol_distributed-replicated_3'):
                self.assertEqual(volume_options_dict['cluster.quorum-type'],
                                 'auto',
                                 'Option cluster.quorum-type '
                                 'is not AUTO for %s'
                                 % volume)
                g.log.info('Option cluster.quorum-type is AUTO for %s'
                           % volume)
            else:
                self.assertEqual(volume_options_dict['cluster.quorum-type'],
                                 'none',
                                 'Option cluster.quorum-type '
                                 'is not NONE for %s'
                                 % volume)
                g.log.info('Option cluster.quorum-type is NONE for %s'
                           % volume)

        # Get first brick server and brick path
        # and get first file from filelist then delete it from volume
        vols_file_list = {}
        for volume in volume_list:
            brick_list = get_all_bricks(self.mnode, volume)
            brick_server, brick_path = brick_list[0].split(':')
            ret, file_list, err = g.run(brick_server, 'ls %s' % brick_path)
            self.assertFalse(ret, 'Failed to ls files on %s' % brick_server)
            file_from_vol = file_list.splitlines()[0]
            ret, out, err = g.run(brick_server, 'rm -rf %s/%s'
                                  % (brick_path, file_from_vol))
            self.assertFalse(ret, 'Failed to rm file on %s' % brick_server)
            vols_file_list[volume] = file_from_vol

        # bring bricks offline
        # bring first brick for testvol_distributed-replicated_1
        volname = 'testvol_distributed-replicated_1'
        brick_list = get_all_bricks(self.mnode, volname)
        bricks_to_bring_offline = brick_list[0:1]
        g.log.info('Bringing bricks %s offline...' % bricks_to_bring_offline)
        ret = bring_bricks_offline(volname, bricks_to_bring_offline)
        self.assertTrue(ret, 'Failed to bring bricks %s offline' %
                        bricks_to_bring_offline)

        ret = are_bricks_offline(self.mnode, volname,
                                 bricks_to_bring_offline)
        self.assertTrue(ret, 'Bricks %s are not offline'
                        % bricks_to_bring_offline)
        g.log.info('Bringing bricks %s offline is successful'
                   % bricks_to_bring_offline)

        # bring first two bricks for testvol_distributed-replicated_3
        volname = 'testvol_distributed-replicated_3'
        brick_list = get_all_bricks(self.mnode, volname)
        bricks_to_bring_offline = brick_list[0:2]
        g.log.info('Bringing bricks %s offline...' % bricks_to_bring_offline)
        ret = bring_bricks_offline(volname, bricks_to_bring_offline)
        self.assertTrue(ret, 'Failed to bring bricks %s offline' %
                        bricks_to_bring_offline)

        ret = are_bricks_offline(self.mnode, volname,
                                 bricks_to_bring_offline)
        self.assertTrue(ret, 'Bricks %s are not offline'
                        % bricks_to_bring_offline)
        g.log.info('Bringing bricks %s offline is successful'
                   % bricks_to_bring_offline)

        # merge two dicts (volname: file_to_delete) and (volname: mountpoint)
        temp_dict = [vols_file_list, self.mount_points_and_volnames]
        file_to_delete_to_mountpoint_dict = {}
        for k in vols_file_list.iterkeys():
            file_to_delete_to_mountpoint_dict[k] = (
                tuple(file_to_delete_to_mountpoint_dict[k]
                      for file_to_delete_to_mountpoint_dict in
                      temp_dict))

        # create files on all volumes and check for result
        for volname, file_and_mountpoint in \
                file_to_delete_to_mountpoint_dict.iteritems():
            filename, mountpoint = file_and_mountpoint

            # check for ROFS error for read-only file system for
            # testvol_distributed-replicated_1 and
            # testvol_distributed-replicated_3
            if (volname == 'testvol_distributed-replicated_1' or
                    volname == 'testvol_distributed-replicated_3'):
                # create new file taken from vols_file_list
                g.log.info("Start creating new file on all mounts...")
                all_mounts_procs = []
                cmd = ("touch %s/%s" % (mountpoint, filename))

                proc = g.run_async(self.client, cmd)
                all_mounts_procs.append(proc)

                # Validate IO
                g.log.info("Validating if IO failed with read-only filesystem")
                ret = is_io_procs_fail_with_rofs(self, all_mounts_procs,
                                                 self.mounts)
                self.assertTrue(ret, ("Unexpected error and IO successfull"
                                      " on read-only filesystem"))
                g.log.info("EXPECTED: "
                           "Read-only file system in IO while creating file")

            # check for no errors for all the rest volumes
            else:
                # create new file taken from vols_file_list
                g.log.info("Start creating new file on all mounts...")
                all_mounts_procs = []
                cmd = ("touch %s/%s" % (mountpoint, filename))

                proc = g.run_async(self.client, cmd)
                all_mounts_procs.append(proc)

                # Validate IO
                g.log.info("Validating IO on mounts")
                ret = validate_io_procs(all_mounts_procs, self.mounts)
                self.assertTrue(ret, "IO failed on some of the clients")
                g.log.info("IO is successful on all mounts")
    def test_nfs_ganesha_export_with_multiple_volumes(self):
        """
        Testcase to verfiy multiple volumes gets exported when IO is in
        progress.
        """

        for i in range(5):
            self.volume['name'] = "nfsvol" + str(i)
            self.volume['voltype']['type'] = 'distributed'
            self.volume['voltype']['replica_count'] = 1
            self.volume['voltype']['dist_count'] = 2

            # Create volume
            ret = setup_volume(mnode=self.mnode,
                               all_servers_info=self.all_servers_info,
                               volume_config=self.volume,
                               force=True)
            if not ret:
                self.assertTrue(ret, ("Setup volume %s failed" % self.volume))
            time.sleep(5)

            # Export volume with nfs ganesha, if it is not exported already
            vol_option = get_volume_options(self.mnode,
                                            self.volume['name'],
                                            option='ganesha.enable')
            self.assertIsNotNone(
                vol_option, "Failed to get ganesha.enable "
                "volume option for %s" % self.volume['name'])
            if vol_option['ganesha.enable'] != 'on':
                ret, _, _ = export_nfs_ganesha_volume(
                    mnode=self.mnode, volname=self.volume['name'])
                self.assertEqual(
                    ret, 0, "Failed to export volume %s as NFS "
                    "export" % self.volume['name'])
                time.sleep(5)
            else:
                g.log.info("Volume %s is exported already",
                           self.volume['name'])

            # Waiting for few seconds for volume export. Max wait time is
            # 120 seconds.
            ret = wait_for_nfs_ganesha_volume_to_get_exported(
                self.mnode, (self.volume['name']))
            self.assertTrue(ret, ("Failed to export volume %s after "
                                  "starting volume when IO is running on "
                                  "another volume" % self.volume['name']))

            # Log Volume Info and Status
            ret = log_volume_info_and_status(self.mnode, self.volume['name'])
            self.assertTrue(
                ret, "Logging volume %s info and status failed" %
                self.volume['name'])

        # Validate IO
        ret = validate_io_procs(self.all_mounts_procs, self.mounts)
        self.io_validation_complete = True
        self.assertTrue(ret, "IO failed on some of the clients")

        # List all files and dirs created
        g.log.info("List all files and directories:")
        ret = list_all_files_and_dirs_mounts(self.mounts)
        self.assertTrue(ret, "Failed to list all files and dirs")
        g.log.info("Listing all files and directories is successful")