def test_nfs_ganesha_export_after_vol_restart(self):
        """
        Tests script to check nfs-ganesha volume gets exported after
        multiple volume restarts.
        """
        for i in range(1, 6):
            g.log.info(
                "Testing nfs ganesha export after volume stop/start."
                "Count : %s", str(i))

            # Stopping volume
            ret = volume_stop(self.mnode, self.volname)
            self.assertTrue(ret, ("Failed to stop volume %s" % self.volname))

            # Waiting for few seconds for volume unexport. Max wait time is
            # 120 seconds.
            ret = wait_for_nfs_ganesha_volume_to_get_unexported(
                self.mnode, self.volname)
            self.assertTrue(ret, ("Failed to unexport volume %s after "
                                  "stopping volume" % self.volname))

            # Starting volume
            ret = volume_start(self.mnode, self.volname)
            self.assertTrue(ret, ("Failed to start volume %s" % self.volname))

            # Waiting for few seconds for volume export. Max wait time is
            # 120 seconds.
            ret = wait_for_nfs_ganesha_volume_to_get_exported(
                self.mnode, self.volname)
            self.assertTrue(ret, ("Failed to export volume %s after "
                                  "starting volume" % self.volname))
    def test_nfs_ganesha_exportID_after_vol_restart(self):
        """
        Tests script to check nfs-ganesha volume gets exported with same
        Export ID after multiple volume restarts.
        Steps:
        1. Create and Export the Volume
        2. Stop and Start the volume multiple times
        3. Check for export ID
           Export ID should not change
        """
        for i in range(1, 4):
            g.log.info(
                "Testing nfs ganesha exportID after volume stop and "
                "start.\n Count : %s", str(i))

            # Stopping volume
            ret = volume_stop(self.mnode, self.volname)
            self.assertTrue(ret, ("Failed to stop volume %s" % self.volname))
            g.log.info("Volume is stopped")

            # Waiting for few seconds for volume unexport. Max wait time is
            # 120 seconds.
            ret = wait_for_nfs_ganesha_volume_to_get_unexported(
                self.mnode, self.volname)
            self.assertTrue(ret, ("Failed to unexport volume %s after "
                                  "stopping volume" % self.volname))
            g.log.info("Volume is unexported via ganesha")

            # Starting volume
            ret = volume_start(self.mnode, self.volname)
            self.assertTrue(ret, ("Failed to start volume %s" % self.volname))
            g.log.info("Volume is started")

            # Waiting for few seconds for volume export. Max wait time is
            # 120 seconds.
            ret = wait_for_nfs_ganesha_volume_to_get_exported(
                self.mnode, self.volname)
            self.assertTrue(ret, ("Failed to export volume %s after "
                                  "starting volume" % self.volname))
            g.log.info("Volume is exported via ganesha")

            # Check for Export ID
            cmd = ("cat /run/gluster/shared_storage/nfs-ganesha/exports/"
                   "export.*.conf | grep Export_Id | grep -Eo '[0-9]'")
            ret, out, _ = g.run(self.mnode, cmd)
            self.assertEqual(
                ret, 0,
                "Unable to get export ID of the volume %s" % self.volname)
            g.log.info("Successful in getting volume export ID: %s " % out)
            self.assertEqual(
                out.strip("\n"), "2",
                "Export ID changed after export and unexport "
                "of volume: %s" % out)
            g.log.info("Export ID of volume is same after export "
                       "and export: %s" % out)
    def test_nfs_ganesha_enable_disable_cluster(self):
        """
        Tests script to check nfs-ganehsa volume gets exported after
        multiple enable/disable of cluster.
        """
        for i in range(1, 6):
            g.log.info(
                "Executing multiple enable/disable of nfs ganesha "
                "cluster. Count : %s ", str(i))

            ret, _, _ = disable_nfs_ganesha(self.mnode)
            self.assertEqual(ret, 0, "Failed to disable nfs-ganesha cluster")

            sleep(2)
            vol_option = get_volume_options(self.mnode,
                                            self.volname,
                                            option='ganesha.enable')
            if vol_option is None:
                self.assertEqual(ret, 0, ("Failed to get ganesha.enable volume"
                                          " option for %s " % self.volume))

            self.assertEqual(
                vol_option.get('ganesha.enable'), 'off', "Failed "
                "to unexport volume by default after disabling "
                "cluster")

            ret, _, _ = enable_nfs_ganesha(self.mnode)
            self.assertEqual(ret, 0, "Failed to enable nfs-ganesha cluster")

            # Check nfs-ganesha status
            for itr in range(5):
                ret = is_nfs_ganesha_cluster_in_healthy_state(self.mnode)
                if ret:
                    g.log.info("nfs-ganesha cluster is healthy")
                    break
                g.log.warning(
                    "nfs-ganesha cluster is not healthy. "
                    "Iteration: %s", str(itr))
                self.assertEqual(
                    itr, 4, "Wait timeout: nfs-ganesha cluster "
                    "is not healthy")
                sleep(3)

            vol_option = get_volume_options(self.mnode,
                                            self.volname,
                                            option='ganesha.enable')
            if vol_option is None:
                self.assertEqual(ret, 0, ("Failed to get ganesha.enable volume"
                                          " option for %s " % self.volume))

            self.assertEqual(
                vol_option.get('ganesha.enable'), 'off', "Volume "
                "%s is exported by default after disable and "
                "enable of cluster which is unexpected." % self.volname)

            # Export volume after disable and enable of cluster
            ret, _, _ = export_nfs_ganesha_volume(mnode=self.mnode,
                                                  volname=self.volname)
            self.assertEqual(
                ret, 0, ("Failed to export volume %s "
                         "after disable and enable of cluster" % self.volname))

            # Wait for volume to get exported
            ret = wait_for_nfs_ganesha_volume_to_get_exported(
                self.mnode, self.volname)
            self.assertTrue(
                ret, "Volume %s is not exported after setting "
                "ganesha.enable 'on'" % self.volname)
            g.log.info("Exported volume after enabling nfs-ganesha cluster")
    def test_nfs_ganesha_subdirectory_mount_from_server_side(self):
        """
        Tests script to verify nfs ganesha subdirectory mount from server
        side succeeds and able to write IOs.
        """
        subdir_to_mount = self.subdir_path.replace(self.mounts[0].mountpoint,
                                                   '')
        if not subdir_to_mount.startswith(os.path.sep):
            subdir_to_mount = os.path.sep + subdir_to_mount

        subdir = self.volname + subdir_to_mount

        for mount_obj in self.sub_dir_mounts:
            mount_obj.volname = subdir

        export_file = ("/var/run/gluster/shared_storage/nfs-ganesha/exports/"
                       "export.%s.conf" % self.volname)
        cmd = (r"sed -i  s/'Path = .*'/'Path = \"\/%s\";'/g %s" %
               (re.escape(subdir), export_file))
        ret, _, _ = g.run(self.mnode, cmd)
        self.assertEqual(ret, 0, ("Unable to change Path info to %s in %s" %
                                  ("/" + subdir, export_file)))

        cmd = ("sed -i  's/volume=.*/& \\n volpath=\"%s\";/g' %s" %
               (re.escape(subdir_to_mount), export_file))
        ret, _, _ = g.run(self.mnode, cmd)
        self.assertEqual(ret, 0, ("Unable to add volpath info to %s in %s" %
                                  ("/" + subdir, export_file)))

        cmd = (r"sed -i  s/'Pseudo=.*'/'Pseudo=\"\/%s\";'/g %s" %
               (re.escape(subdir), export_file))
        ret, _, _ = g.run(self.mnode, cmd)
        self.assertEqual(ret, 0, ("Unable to change pseudo Path info to "
                                  "%s in %s" % ("/" + subdir, export_file)))

        # Stop and start volume to take the modified export file to effect.
        # Stopping volume
        ret = volume_stop(self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to stop volume %s" % self.volname))

        # Waiting for few seconds for volume unexport. Max wait time is
        # 120 seconds.
        ret = wait_for_nfs_ganesha_volume_to_get_unexported(
            self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to unexport volume %s after "
                              "stopping volume" % self.volname))

        # Starting volume
        ret = volume_start(self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to start volume %s" % self.volname))

        # Waiting for few seconds for volume export. Max wait time is
        # 120 seconds.
        ret = wait_for_nfs_ganesha_volume_to_get_exported(self.mnode, subdir)
        self.assertTrue(ret, ("Failed to export sub directory %s after "
                              "starting volume" % subdir))

        for mount_obj in self.sub_dir_mounts:
            if not mount_obj.is_mounted():
                ret = mount_obj.mount()
                self.assertTrue(
                    ret, ("Unable to mount volume '%s:%s' "
                          "on '%s:%s'" %
                          (mount_obj.server_system, mount_obj.volname,
                           mount_obj.client_system, mount_obj.mountpoint)))

        ret = self.start_and_wait_for_io_to_complete(self.sub_dir_mounts)
        self.assertTrue(ret, ("Failed to write IOs when sub directory is"
                              " mounted from server side"))
        g.log.info("IO successful on clients")
    def test_nfs_ganesha_export_with_multiple_volumes(self):
        """
        Test case to verify multiple volumes gets exported when IO is in
        progress.
        """
        # Starting IO on the mounts
        all_mounts_procs = []
        count = 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Create and export five new volumes
        for i in range(5):
            # Check availability of bricks to create new volume
            num_of_unused_bricks = 0

            servers_unused_bricks_dict = get_servers_unused_bricks_dict(
                self.mnode, self.all_servers, self.all_servers_info)
            for each_server_unused_bricks_list in list(
                    servers_unused_bricks_dict.values()):
                num_of_unused_bricks = (num_of_unused_bricks +
                                        len(each_server_unused_bricks_list))

            if num_of_unused_bricks < 2:
                self.assertNotEqual(
                    i, 0, "New volume cannot be created due "
                    "to unavailability of bricks.")
                g.log.warning(
                    "Tried to create five new volumes. But could "
                    "create only %s volume due to unavailability "
                    "of bricks.", str(i))
                break

            self.volume['name'] = "nfsvol" + str(i)
            self.volume['voltype']['type'] = 'distributed'
            self.volume['voltype']['replica_count'] = 1
            self.volume['voltype']['dist_count'] = 2

            new_vol = self.volume['name']

            # Create volume
            ret = setup_volume(mnode=self.mnode,
                               all_servers_info=self.all_servers_info,
                               volume_config=self.volume,
                               force=True)
            if not ret:
                self.assertTrue(ret, "Setup volume [%s] failed" % self.volume)

            g.log.info("Wait for volume processes to be online")
            ret = wait_for_volume_process_to_be_online(self.mnode, new_vol)
            self.assertTrue(
                ret, "Volume %s process not online despite "
                "waiting for 300 seconds" % new_vol)

            # Export volume with nfs ganesha
            ret, _, _ = export_nfs_ganesha_volume(mnode=self.mnode,
                                                  volname=new_vol)
            self.assertEqual(ret, 0, ("Failed to export volume %s "
                                      "using nfs-ganesha" % new_vol))

            # Wait for volume to get exported
            ret = wait_for_nfs_ganesha_volume_to_get_exported(
                self.mnode, new_vol)
            self.assertTrue(
                ret, "Volume %s is not exported after setting "
                "ganesha.enable 'on'" % new_vol)
            g.log.info("Exported nfs-ganesha volume %s", new_vol)

            # Log Volume Info and Status
            ret = log_volume_info_and_status(self.mnode, new_vol)
            self.assertTrue(
                ret, "Logging volume %s info and status failed" % new_vol)

        # Validate IO
        g.log.info("Validating IO")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all IO")
Exemple #6
0
    def test_new_volume_while_io_in_progress(self):
        """
        Create, export and mount new volume while IO running on mount of
        another volume
        Steps:
        1. Start IO on mount points
        2. Create another volume 'volume_new'
        3. Export volume_new through nfs-ganesha
        4. Mount the volume on clients
        """
        # pylint: disable=too-many-statements, too-many-locals
        # Start IO on all mount points
        all_mounts_procs = []
        count = 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        self.volname_new = '%s_new' % self.volname
        kwargs = {}
        dict_index = 0

        # Creating mounts list for mounting new volume
        self.mounts_new = []
        for mount_obj in self.mounts:
            self.mounts_new.append(deepcopy(mount_obj))
        for mount_obj in self.mounts_new:
            mount_obj.volname = self.volname_new
            mount_obj.mountpoint = '%s_new' % mount_obj.mountpoint

        # Fetch details for creating a replicate volume.
        replica_count = (
            self.default_volume_type_config['replicated']['replica_count'])
        servers_bricks_dict = get_servers_bricks_dict(self.all_servers,
                                                      self.all_servers_info)
        bricks_list = []
        kwargs['replica_count'] = replica_count
        kwargs['transport_type'] = (
            self.default_volume_type_config['replicated']['transport'])

        for num in range(0, replica_count):
            # Current_server is the server on which brick path will be created
            current_server = list(servers_bricks_dict.keys())[dict_index]
            current_server_unused_bricks_list = (list(
                servers_bricks_dict.values())[dict_index])
            if current_server_unused_bricks_list:
                brick_path = (
                    "%s:%s/%s_brick%s" %
                    (current_server, current_server_unused_bricks_list[0],
                     self.volname_new, num))
                bricks_list.append(brick_path)

                # Remove the added brick from the list
                list(servers_bricks_dict.values())[dict_index].pop(0)

            if dict_index < len(servers_bricks_dict) - 1:
                dict_index = dict_index + 1
            else:
                dict_index = 0

        # Create volume 'volume_new'
        ret, _, _ = volume_create(mnode=self.mnode,
                                  volname=self.volname_new,
                                  bricks_list=bricks_list,
                                  force=False,
                                  **kwargs)
        self.assertEqual(ret, 0,
                         "Unable to create volume %s" % self.volname_new)
        g.log.info("Successfully created volume %s", self.volname_new)

        ret, _, _ = volume_start(self.mnode, self.volname_new)
        self.assertEqual(ret, 0,
                         "Unable to start volume %s" % self.volname_new)

        # Wait for volume processes to be online
        g.log.info("Wait for volume %s processes to be online",
                   self.volname_new)
        ret = wait_for_volume_process_to_be_online(self.mnode,
                                                   self.volname_new)
        self.assertTrue(
            ret, "Wait timeout: Processes of volume %s are "
            "not online." % self.volname_new)
        g.log.info("Volume processes of volume %s are now online",
                   self.volname_new)

        # Export volume as nfs-ganesha export
        ret, _, _ = export_nfs_ganesha_volume(self.mnode, self.volname_new)
        self.assertEqual(
            ret, 0, "Failed to set ganesha.enable 'on' on "
            "volume %s" % self.volname_new)
        g.log.info(
            "Successful in setting ganesha.enable to 'on' on "
            "volume %s", self.volname_new)

        # Verify volume export
        ret = wait_for_nfs_ganesha_volume_to_get_exported(
            self.mnode, self.volname_new)
        self.assertTrue(
            ret, "Failed to export volume %s as nfs-ganesha "
            "export" % self.volname_new)
        g.log.info("Successfully exported volume %s", self.volname_new)

        # Mount the new volume
        for mount_obj in self.mounts_new:
            ret = mount_obj.mount()
            self.assertTrue(
                ret, ("Failed to mount %s on client"
                      " %s" % (mount_obj.volname, mount_obj.client_system)))
            g.log.info("Successfully mounted %s on client %s",
                       mount_obj.volname, mount_obj.client_system)

        # Verify mounts
        for mount_obj in self.mounts_new:
            ret = mount_obj.is_mounted()
            self.assertTrue(
                ret, ("Volume %s is not mounted on client"
                      " %s" % (mount_obj.volname, mount_obj.client_system)))
            g.log.info("Verified: Volume %s is mounted on client %s",
                       mount_obj.volname, mount_obj.client_system)
        g.log.info("Export and mount of new volume %s is success.",
                   self.volname_new)

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all IO")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")
Exemple #7
0
    def test_root_squash_enable(self):
        """
        Tests to verify Nfs Ganesha rootsquash functionality when volume
        is restarted
        Steps:
        1. Create some files and dirs inside mount point
        2. Set permission as 777 for mount point
        3. Enable root-squash on volume
        4. Create some more files and dirs
        5. Restart volume
        6. Try to edit file created in step 1
           It should not allow to edit the file
        7. Try to edit the file created in step 5
           It should allow to edit the file
        """
        # Start IO on mount point.
        cmd = ("for i in {1..10}; do touch %s/file$i; done" %
               self.mounts[0].mountpoint)
        ret, _, err = g.run(self.mounts[0].client_system,
                            cmd,
                            user=self.mounts[0].user)
        self.assertEqual(ret, 0, err)

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successful in getting stats of files/dirs "
                   "from mount point")

        # Set mount point permission to 777
        ret = set_file_permissions(self.mounts[0].client_system,
                                   self.mounts[0].mountpoint, 777)
        self.assertTrue(ret, "Failed to set permission for directory")
        g.log.info("Successfully set permissions for directory")

        # Enable root-squash on volume
        ret = set_root_squash(self.servers[0], self.volname)
        self.assertTrue(ret, "Failed to enable root-squash on volume")
        g.log.info("root-squash is enable on the volume")

        # Start IO on mount point.
        cmd = ("for i in {1..10}; do touch %s/Squashfile$i; done" %
               self.mounts[0].mountpoint)
        ret, _, err = g.run(self.mounts[0].client_system,
                            cmd,
                            user=self.mounts[0].user)
        self.assertEqual(ret, 0, err)

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successful in getting stats of files/dirs "
                   "from mount point")

        # Stopping volume
        ret = volume_stop(self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to stop volume %s" % self.volname))
        g.log.info("Successful in stopping volume %s" % self.volname)

        # Waiting for few seconds for volume unexport. Max wait time is
        # 120 seconds.
        ret = wait_for_nfs_ganesha_volume_to_get_unexported(
            self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to unexport volume %s after "
                              "stopping volume" % self.volname))
        g.log.info("Volume is unexported successfully")

        # Starting volume
        ret = volume_start(self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to start volume %s" % self.volname))
        g.log.info("Successful in starting volume %s" % self.volname)

        # Waiting for few seconds for volume export. Max wait time is
        # 120 seconds.
        ret = wait_for_nfs_ganesha_volume_to_get_exported(
            self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to export volume %s after "
                              "starting volume" % self.volname))
        g.log.info("Volume is exported successfully")

        # Edit file created by root user
        for mount_obj in self.mounts:
            ret = append_string_to_file(mount_obj.client_system,
                                        "%s/file10" % mount_obj.mountpoint,
                                        'hello')
            self.assertFalse(
                ret, "Unexpected:nfsnobody user editing file "
                "created by root user should FAIL")
            g.log.info("Successful:nfsnobody user failed to edit file "
                       "created by root user")

        # Edit the file created by nfsnobody user
        for mount_obj in self.mounts:
            ret = append_string_to_file(
                mount_obj.client_system,
                "%s/Squashfile5" % mount_obj.mountpoint, 'hello')
            self.assertTrue(
                ret, "Unexpected:nfsnobody user failed to edit "
                "the file created by nfsnobody user")
            g.log.info("Successful:nfsnobody user successfully edited the "
                       "file created by nfsnobody user")
    def test_nfs_ganesha_export_with_multiple_volumes(self):
        """
        Testcase to verfiy multiple volumes gets exported when IO is in
        progress.
        """

        for i in range(5):
            self.volume['name'] = "nfsvol" + str(i)
            self.volume['voltype']['type'] = 'distributed'
            self.volume['voltype']['replica_count'] = 1
            self.volume['voltype']['dist_count'] = 2

            # Create volume
            ret = setup_volume(mnode=self.mnode,
                               all_servers_info=self.all_servers_info,
                               volume_config=self.volume,
                               force=True)
            if not ret:
                self.assertTrue(ret, ("Setup volume %s failed" % self.volume))
            time.sleep(5)

            # Export volume with nfs ganesha, if it is not exported already
            vol_option = get_volume_options(self.mnode,
                                            self.volume['name'],
                                            option='ganesha.enable')
            self.assertIsNotNone(
                vol_option, "Failed to get ganesha.enable "
                "volume option for %s" % self.volume['name'])
            if vol_option['ganesha.enable'] != 'on':
                ret, _, _ = export_nfs_ganesha_volume(
                    mnode=self.mnode, volname=self.volume['name'])
                self.assertEqual(
                    ret, 0, "Failed to export volume %s as NFS "
                    "export" % self.volume['name'])
                time.sleep(5)
            else:
                g.log.info("Volume %s is exported already",
                           self.volume['name'])

            # Waiting for few seconds for volume export. Max wait time is
            # 120 seconds.
            ret = wait_for_nfs_ganesha_volume_to_get_exported(
                self.mnode, (self.volume['name']))
            self.assertTrue(ret, ("Failed to export volume %s after "
                                  "starting volume when IO is running on "
                                  "another volume" % self.volume['name']))

            # Log Volume Info and Status
            ret = log_volume_info_and_status(self.mnode, self.volume['name'])
            self.assertTrue(
                ret, "Logging volume %s info and status failed" %
                self.volume['name'])

        # Validate IO
        ret = validate_io_procs(self.all_mounts_procs, self.mounts)
        self.io_validation_complete = True
        self.assertTrue(ret, "IO failed on some of the clients")

        # List all files and dirs created
        g.log.info("List all files and directories:")
        ret = list_all_files_and_dirs_mounts(self.mounts)
        self.assertTrue(ret, "Failed to list all files and dirs")
        g.log.info("Listing all files and directories is successful")