def test_mount_remove_client_logs_dir_remount(self):

        # pylint: disable=too-many-statements
        """
        1. Create all types of volumes and start them.
        2. Mount all volumes on clients.
        3. Delete /var/log/glusterfs folder on client.
        4. Run IO on all the mount points.
        5. Unmount and remount all volumes.
        6. Check if logs are regenerated or not.
        """

        # Mounting the volume.
        ret, _, _ = mount_volume(self.volname, mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.mnode,
                                 mclient=self.mounts[0].client_system)
        self.assertEqual(ret, 0, ("Volume %s is not mounted.") % self.volname)
        g.log.info("Volume mounted successfully : %s", self.volname)

        # Removing dir /var/log/glusterfs on client.
        cmd = 'mv /var/log/glusterfs /root/'
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Unable to remove /var/log/glusterfs dir.")
        g.log.info("Successfully removed /var/log/glusterfs on client: %s",
                   self.mounts[0])

        # Running IO on the mount point.
        # Creating a dir on the mount point.
        ret = mkdir(self.mounts[0].client_system,
                    self.mounts[0].mountpoint+"/dir")
        self.assertTrue(ret, "Failed to create dir.")
        g.log.info("dir created successfully for %s", self.mounts[0])

        # Creating a file on the mount point.
        cmd = ('touch  %s/file' % self.mounts[0].mountpoint)
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Failed to create file.")
        g.log.info("file created successfully for %s", self.mounts[0])

        # Unmounting and remounting volume.
        ret, _, _ = umount_volume(mclient=self.mounts[0].client_system,
                                  mpoint=self.mounts[0].mountpoint)
        self.assertEqual(ret, 0,
                         ("Volume %s is not unmounted.") % self.volname)
        g.log.info("Volume unmounted successfully : %s", self.volname)

        ret, _, _ = mount_volume(self.volname, mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.mnode,
                                 mclient=self.mounts[0].client_system)
        self.assertEqual(ret, 0, ("Volume %s is not mounted.") % self.volname)
        g.log.info("Volume mounted successfully : %s", self.volname)

        # Checking if logs are regenerated or not.
        ret = get_dir_contents(self.mounts[0].client_system,
                               '/var/log/glusterfs/')
        self.assertIsNotNone(ret, 'Log files were not regenerated.')
        g.log.info("Log files were properly regenearted.")
예제 #2
0
    def _disable_io_encryption(self):
        """ Disables IO encryption """
        # UnMount Volume
        g.log.info("Starting to Unmount Volume %s", self.volname)
        ret, _, _ = umount_volume(self.mounts[0].client_system,
                                  self.mounts[0].mountpoint,
                                  mtype=self.mount_type)
        self.assertEqual(ret, 0, "Failed to Unmount volume")

        # Stop Volume
        ret, _, _ = volume_stop(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to Stop volume")

        # Disable server and client SSL usage
        options = {"server.ssl": "off",
                   "client.ssl": "off"}
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(ret, "Failed to set volume options")

        # Start Volume
        ret, _, _ = volume_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to Start volume")

        # Mount Volume
        ret, _, _ = mount_volume(self.volname, mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.mnode,
                                 mclient=self.mounts[0].client_system)
        self.assertEqual(ret, 0, "Failed to mount the volume back")
예제 #3
0
        def mount_clone_and_io(clone, mpoint):
            # define mounts
            self.mount_points = []
            self.mounts_dict_list = []
            for client in self.all_clients_info:
                mount = {
                    'protocol': self.mount_type,
                    'server': self.mnode,
                    'volname': clone,
                    'client': self.all_clients_info[client],
                    'mountpoint': (os.path.join("%s" % mpoint)),
                    'options': ''
                }
                self.mounts_dict_list.append(mount)
            self.mounts1 = create_mount_objs(self.mounts_dict_list)
            g.log.info("Successfully made entry in self.mounts")
            # Mounting a volume
            g.log.info("Starting to mount volume")
            ret = mount_volume(clone, self.mount_type, mpoint, self.mnode,
                               self.clients[0])
            self.assertTrue(ret, "Volume mount failed for clone1")
            g.log.info("%s mounted Successfully", clone)

            # Checking volume mounted or not
            ret = is_mounted(clone, mpoint, self.mnode, self.clients[0],
                             self.mount_type)
            self.assertTrue(ret,
                            "Volume not mounted on mount point: %s" % mpoint)
            g.log.info("Volume %s mounted on %s", clone, mpoint)
            return 0
 def _try_mounting_volume(self):
     """ Mount a volume """
     ret, _, _ = mount_volume(self.volname,
                              mtype="glusterfs",
                              mpoint="/mnt/distribute-vol",
                              mserver=self.mnode,
                              mclient=self.servers[1])
     self.assertNotEqual(ret, 0,
                         "Unexpected: Volume %s is mounted" % self.volname)
예제 #5
0
    def test_stat_prefetch(self):

        # pylint: disable=ungrouped-imports
        self.vips = (g.config['gluster']['cluster_config']['smb']['ctdb_vips'])
        # Virtual Ip of first node to mount
        self.vips_mnode = self.vips[0]['vip']
        g.log.info("CTDB Virtual Ip %s", self.vips_mnode)
        # run IOs
        self.counter = 1
        g.log.info("Starting IO on all mounts...")
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            ret, _, _ = mount_volume(self.volname,
                                     'cifs',
                                     mount_obj.mountpoint,
                                     self.vips_mnode,
                                     mount_obj.client_system,
                                     smbuser='******',
                                     smbpasswd='foobar')
            self.assertEqual(ret, 0, "Cifs Mount Failed")
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_files -f 10000"
                   " --base-file-name ctdb-cifs "
                   " --fixed-file-size 10k %s/samba/" %
                   (self.script_upload_path, mount_obj.mountpoint))

            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
        self.io_validation_complete = False
        # Switch off and switch on stat-prefetch
        options = {"stat-prefetch": "off"}
        ret = set_volume_options(self.mnode, self.volname, options)
        if not ret:
            raise ExecutionError("Failed to execute volume set"
                                 "option command")
        ret = get_volume_options(self.mnode, self.volname)
        if ret['performance.stat-prefetch'] != "off":
            raise ExecutionError("Failed to set stat-prefetch off")
        options = {"stat-prefetch": "on"}
        ret = set_volume_options(self.mnode, self.volname, options)
        if not ret:
            raise ExecutionError("Failed to execute volume set"
                                 "option command")
        ret = get_volume_options(self.mnode, self.volname)
        if ret['performance.stat-prefetch'] != "on":
            raise ExecutionError("Failed to set stat-prefetch on")
        # Validate IO
        g.log.info("Wait for IO to complete and validate IO ...")
        ret = validate_io_procs(self.all_mounts_procs, self.mounts)
        self.io_validation_complete = True
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Creation of 10000 files Success")
        g.log.info("test__samba_ctdb_cifs_io_rename PASSED")
    def test_ec_read_from_hardlink(self):
        """
        Test steps:
        1. Enable metadata-cache(md-cache) options on the volume
        2. Touch a file and create a hardlink for it
        3. Read data from the hardlink.
        4. Read data from the actual file.
        """
        options = {'group': 'metadata-cache'}
        # Set metadata-cache options as group
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(ret, "Unable to set the volume options {}".
                        format(options))
        g.log.info("Able to set the %s options", options)

        # Mounting the volume on one client
        ret, _, _ = mount_volume(self.volname, mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.mnode,
                                 mclient=self.mounts[0].client_system)
        self.assertEqual(ret, 0, ("Volume {} is not mounted").
                         format(self.volname))
        g.log.info("Volume mounted successfully : %s", self.volname)

        file_name = self.mounts[0].mountpoint + "/test1"
        content = "testfile"
        hard_link = self.mounts[0].mountpoint + "/test1_hlink"
        cmd = 'echo "{content}" > {file}'.format(file=file_name,
                                                 content=content)

        # Creating a file with data
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Sucessful in creating a file with data")
        g.log.info("file created successfully on %s",
                   self.mounts[0].mountpoint)

        # Creating a hardlink for the file created
        ret = create_link_file(self.mounts[0].client_system,
                               file_name, hard_link)
        self.assertTrue(ret, "Link file creation failed")
        g.log.info("Link file creation for %s is successful", file_name)

        # Reading from the file as well as the hardlink
        for each in (file_name, hard_link):
            ret, out, _ = g.run(self.mounts[0].client_system,
                                "cat {}".format(each))
            self.assertEqual(ret, 0, "Unable to read the {}".format(each))
            self.assertEqual(content, out.strip('\n'), "The content {} and"
                             " data in file {} is not same".
                             format(content, each))
            g.log.info("Read of %s file is successful", each)
예제 #7
0
    def _set_option_and_mount_and_unmount_volumes(self,
                                                  option="",
                                                  is_allowed=True):
        """
        Setting volume option and then mounting and unmounting the volume
        """
        # Check if an option is passed
        if option:
            # Setting the option passed as an argument
            ret = set_volume_options(self.mnode, self.volname,
                                     {option: self.mounts[0].client_system})
            self.assertTrue(
                ret, "Failed to set %s option in volume: %s" %
                (option, self.volname))
            g.log.info("Successfully set %s option in volume: %s", option,
                       self.volname)

        # Mounting a volume
        ret, _, _ = mount_volume(self.volname,
                                 mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.mnode,
                                 mclient=self.mounts[0].client_system)

        # Checking if volume was successfully mounted or not
        ret = is_mounted(self.volname,
                         mtype=self.mount_type,
                         mpoint=self.mounts[0].mountpoint,
                         mserver=self.mnode,
                         mclient=self.mounts[0].client_system)
        if is_allowed:
            self.assertTrue(ret,
                            "Failed to mount the volume: %s" % self.volname)
        else:
            self.assertFalse(
                ret, "Unexpected: Mounting"
                " the volume %s was successful" % self.volname)

        # Unmount only if the volume is supposed to be mounted
        if is_allowed:
            ret, _, _ = umount_volume(self.mounts[0].client_system,
                                      self.mounts[0].mountpoint,
                                      mtype=self.mount_type)
            self.assertEqual(ret, 0,
                             "Failed to unmount the volume: %s" % self.volname)
예제 #8
0
def get_pathinfo(mnode, filename, volname):
    """This module gets filepath of the given file in gluster server.

    Example:
        get_pathinfo(mnode, "file1", "testvol")

    Args:
        mnode (str): Node on which cmd has to be executed.
        filename (str): relative path of file
        volname (str): volume name

    Returns:
        NoneType: None if command execution fails, parse errors.
        list: file path for the given file in gluster server
    """

    mount_point = tempfile.mkdtemp()

    # Performing glusterfs mount because only with glusterfs mount
    # the file location in gluster server can be identified
    ret, _, _ = mount_volume(volname,
                             mtype='glusterfs',
                             mpoint=mount_point,
                             mserver=mnode,
                             mclient=mnode)
    if ret != 0:
        g.log.error("Failed to do gluster mount on volume %s to fetch"
                    "pathinfo from server %s" % (volname, mnode))
        return None

    filename = mount_point + '/' + filename
    attr_name = 'trusted.glusterfs.pathinfo'
    output = get_extended_attributes_info(mnode, [filename],
                                          attr_name=attr_name)
    if output is None:
        g.log.error("Failed to get path info for %s" % filename)
        return None

    pathinfo = output[filename][attr_name]

    umount_volume(mnode, mount_point)
    g.run(mnode, "rm -rf " + mount_point)

    return re.findall(r".*?POSIX.*?:(\S+)\>", pathinfo)
예제 #9
0
    def trigger_heal_from_mount_point(self):
        """
        Trigger heal from mount point using read.
        """
        # Unmouting and remounting volume to update the volume graph
        # in client.
        ret, _, _ = umount_volume(
            self.mounts[0].client_system, self.mounts[0].mountpoint)
        self.assertFalse(ret, "Failed to unmount volume.")

        ret, _, _ = mount_volume(
            self.volname, 'glusterfs', self.mounts[0].mountpoint,
            self.mnode, self.mounts[0].client_system)
        self.assertFalse(ret, "Failed to remount volume.")
        g.log.info('Successfully umounted and remounted volume.')

        # Trigger heal from client side
        cmd = ("/usr/bin/env python {0} read {1}/{2}".format(
            self.script_upload_path, self.mounts[0].mountpoint,
            self.test_meta_data_self_heal_folder))
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertFalse(ret, 'Failed to trigger heal on %s'
                         % self.mounts[0].client_system)
        g.log.info("Successfully triggered heal from mount point.")
    def setUp(self):

        self.get_super_method(self, 'setUp')()

        # Setup Volume
        if not self.setup_volume():
            raise ExecutionError("Failed to Setup volume")

        self.first_client = self.mounts[0].client_system
        self.mount_point = self.mounts[0].mountpoint

        # Mount volume with -o acl option
        ret, _, _ = mount_volume(self.volname,
                                 self.mount_type,
                                 self.mount_point,
                                 self.mnode,
                                 self.first_client,
                                 options='acl')
        if ret:
            raise ExecutionError("Failed to mount volume")

        # Create a non-root user
        if not add_user(self.first_client, 'joker'):
            raise ExecutionError("Failed to create user joker")
    def test_snap_clone_snapd(self):
        """
        Steps:

        1. create a volume
        2. Create a snapshots and activate
        3. Clone the snapshot and mount it
        4. Check for snapd daemon
        5. enable uss and validate snapd
        5. stop cloned volume
        6. Validate snapd
        7. start cloned volume
        8. validate snapd
        9. Create 5 more snapshot
        10. Validate total number of
            snapshots created.
        11. Activate 5 snapshots
        12. Enable USS
        13. Validate snapd
        14. kill snapd on all nodes
        15. validate snapd running
        16. force start clone volume
        17. validate snaps inside .snaps directory
        """
        # pylint: disable=too-many-statements, too-many-locals

        # Starting I/O
        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s create_files "
                   "-f 10 --base-file-name file %s" % (
                       self.script_upload_path,
                       mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system, cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # Validate I/O
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("IO is successful on all mounts")

        # Creating snapshot
        ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
        self.assertEqual(ret, 0, ("Failed to create snapshot for volume %s"
                                  % self.volname))
        g.log.info("Snapshot %s created successfully for "
                   "volume %s", self.snap, self.volname)

        # Activating created snapshots
        ret, _, _ = snap_activate(self.mnode, self.snap)
        self.assertEqual(ret, 0, ("Failed to activate snapshot %s"
                                  % self.snap))
        g.log.info("Snapshot snap%s activated successfully", self.snap)

        # Snapshot list
        self.assertIsNotNone(
            get_snap_list(self.mnode), "Failed to list snapshot")
        g.log.info("Snapshot list command Successful")

        # Creating and starting a Clone of snapshot:
        ret, _, _ = snap_clone(self.mnode, self.snap, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to clone %s" % self.clone_vol1)
        g.log.info("Clone volume %s created successfully", self.clone_vol1)

        # Start the clone volumes
        ret, _, _ = volume_start(self.mnode, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to start %s" % self.clone_vol1)
        g.log.info("%s started successfully", self.clone_vol1)

        # Form server list
        brick_list = get_all_bricks(self.mnode, self.clone_vol1)
        for bricks in brick_list:
            self.server_lists.append(bricks.split(":")[0])
        self.server_list = list(set(self.server_lists))

        # Get volume info
        vol_info = get_volume_info(self.mnode, self.clone_vol1)
        self.assertIsNotNone(vol_info, "Failed to get vol info")
        g.log.info("Successfully in getting vol info")

        # Redefining mounts for cloned volume
        self.mount_points, self.mounts_dict_list = [], []
        for client in self.all_clients_info:
            mount = {
                'protocol': self.mount_type,
                'server': self.mnode,
                'volname': self.volname,
                'client': self.all_clients_info[client],
                'mountpoint': (path.join(
                    "%s" % self.mpoint)),
                'options': ''
            }
            self.mounts_dict_list.append(mount)
        self.mount1 = create_mount_objs(self.mounts_dict_list)
        self.mount_points.append(self.mpoint)
        g.log.info("Successfully made entry in self.mount1")

        # FUSE mount clone1 volume
        for mount_obj in self.mounts:
            ret, _, _ = mount_volume(self.clone_vol1, self.mount_type,
                                     self.mpoint,
                                     self.mnode, mount_obj.client_system)
            self.assertEqual(ret, 0, "Volume mount failed for clone1")
            g.log.info("%s mounted Successfully", self.clone_vol1)

            # Validate clone volume is mounted or not
            ret = is_mounted(self.clone_vol1, self.mpoint, self.mnode,
                             mount_obj.client_system, self.mount_type)
            self.assertTrue(ret, "Volume not mounted on mount point: "
                            "%s" % self.mpoint)
            g.log.info("Volume %s mounted on %s", self.clone_vol1, self.mpoint)

        # Log Cloned Volume information
        ret = log_volume_info_and_status(self.mnode, self.clone_vol1)
        self.assertTrue("Failed to Log Info and Status of Volume "
                        "%s" % self.clone_vol1)
        g.log.info("Successfully Logged Info and Status")

        # Validate snapd running on all nodes
        self.validate_snapd(check_condition=False)

        # Enable USS
        ret, _, _ = enable_uss(self.mnode, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to enable USS on cloned volume")
        g.log.info("Successfully enabled USS on Cloned volume")

        # Validate USS running
        self.validate_uss()

        # Validate snapd running on all nodes
        self.validate_snapd()

        # Stop cloned volume
        ret, _, _ = volume_stop(self.mnode, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to stop cloned volume "
                         "%s" % self.clone_vol1)
        g.log.info("Successfully Stopped Cloned volume %s", self.clone_vol1)

        # Validate snapd running on all nodes
        self.validate_snapd(check_condition=False)

        # Start cloned volume
        ret, _, _ = volume_start(self.mnode, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to start cloned volume"
                         " %s" % self.clone_vol1)
        g.log.info("Successfully started cloned volume"
                   " %s", self.clone_vol1)

        # Validate snapd running on all nodes
        self.validate_snapd()

        # Create 5 snapshots
        self.snaps_list = [('test_snap_clone_snapd-snap%s'
                            % i)for i in range(0, 5)]
        for snapname in self.snaps_list:
            ret, _, _ = snap_create(self.mnode, self.clone_vol1,
                                    snapname)
            self.assertEqual(ret, 0, ("Failed to create snapshot for volume"
                                      " %s" % self.clone_vol1))
            g.log.info("Snapshot %s created successfully for volume "
                       "%s", snapname, self.clone_vol1)

        # Validate USS running
        self.validate_uss()

        # Check snapshot under .snaps directory
        self.check_snaps()

        # Activate Snapshots
        for snapname in self.snaps_list:
            ret, _, _ = snap_activate(self.mnode, snapname)
            self.assertEqual(ret, 0, ("Failed to activate snapshot %s"
                                      % snapname))
            g.log.info("Snapshot %s activated "
                       "successfully", snapname)

        # Validate USS running
        self.validate_uss()

        # Validate snapshots under .snaps folder
        self.validate_snaps()

        # Kill snapd on node and validate snapd except management node
        for server in self.servers[1:]:
            ret, _, _ = terminate_snapd_on_node(server)
            self.assertEqual(ret, 0, "Failed to Kill snapd on node %s"
                             % server)
            g.log.info("snapd Killed Successfully on node %s", server)

            # Check snapd running
            ret = is_snapd_running(server, self.clone_vol1)
            self.assertTrue(ret, "Unexpected: Snapd running on node: "
                            "%s" % server)
            g.log.info("Expected: Snapd is not running on node:%s", server)

            # Check snapshots under .snaps folder
            g.log.info("Validating snapshots under .snaps")
            ret, _, _ = uss_list_snaps(self.clients[0], self.mpoint)
            self.assertEqual(ret, 0, "Target endpoint not connected")
            g.log.info("Successfully listed snapshots under .snaps")

        # Kill snapd in management node
        ret, _, _ = terminate_snapd_on_node(self.servers[0])
        self.assertEqual(ret, 0, "Failed to Kill snapd on node %s"
                         % self.servers[0])
        g.log.info("snapd Killed Successfully on node %s", self.servers[0])

        # Validate snapd running on all nodes
        self.validate_snapd(check_condition=False)

        # Validating snapshots under .snaps
        ret, _, _ = uss_list_snaps(self.clients[0], self.mpoint)
        self.assertNotEqual(ret, 0, "Unexpected: Successfully listed "
                            "snapshots under .snaps")
        g.log.info("Expected: Target endpoint not connected")

        # Start the Cloned volume(force start)
        ret, _, _ = volume_start(self.mnode, self.clone_vol1, force=True)
        self.assertEqual(ret, 0, "Failed to start cloned volume "
                         "%s" % self.clone_vol1)
        g.log.info("Successfully Started Cloned volume %s", self.clone_vol1)

        # Validate snapd running on all nodes
        self.validate_snapd()

        # Validate snapshots under .snaps folder
        self.validate_snaps()
예제 #12
0
    def test_server_side_healing_happens_only_when_glustershd_running(self):
        """
        Test Script which verifies that the server side healing must happen
        only if the heal daemon is running on the node where source brick
        resides.

         * Create and start the Replicate volume
         * Check the glustershd processes - Only 1 glustershd should be listed
         * Bring down the bricks without affecting the cluster
         * Create files on volume
         * kill the glustershd on node where bricks is running
         * bring the bricks up which was killed in previous steps
         * check the heal info - heal info must show pending heal info, heal
           shouldn't happen since glustershd is down on source node
         * issue heal
         * trigger client side heal
         * heal should complete successfully
        """
        # pylint: disable=too-many-locals,too-many-statements,too-many-lines
        # Setting Volume options
        options = {
            "metadata-self-heal": "on",
            "entry-self-heal": "on",
            "data-self-heal": "on"
        }
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(ret, 'Failed to set options %s' % options)
        g.log.info("Successfully set %s for volume %s", options, self.volname)

        # Check the self-heal daemon process
        ret, pids = get_self_heal_daemon_pid(self.servers)
        self.assertTrue(ret, ("Either No self heal daemon process found or "
                              "more than One self heal daemon process "
                              "found : %s" % pids))
        g.log.info(
            "Successful in verifying self heal daemon process"
            " on all nodes %s", self.servers)

        # Select the bricks to bring offline
        bricks_to_bring_offline = (select_volume_bricks_to_bring_offline(
            self.mnode, self.volname))
        g.log.info("Brick List to bring offline : %s", bricks_to_bring_offline)

        # Bring down the selected bricks
        ret = bring_bricks_offline(self.volname, bricks_to_bring_offline)
        self.assertTrue(ret, "Failed to bring down the bricks")
        g.log.info("Brought down the brick process "
                   "for %s", bricks_to_bring_offline)

        # Write files on all mounts
        all_mounts_procs, num_files_to_write = [], 100
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s create_files "
                   "-f %s --base-file-name file %s" %
                   (self.script_upload_path, num_files_to_write,
                    mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # Validate IO
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("IO is successful on all mounts")

        # Get online bricks list
        online_bricks = get_online_bricks_list(self.mnode, self.volname)
        g.log.info("Online Bricks for volume %s : %s", self.volname,
                   online_bricks)

        # Get the nodes where bricks are running
        bring_offline_glustershd_nodes = []
        for brick in online_bricks:
            bring_offline_glustershd_nodes.append(brick.split(":")[0])
        g.log.info("self heal deamon on nodes %s to be killed",
                   bring_offline_glustershd_nodes)

        # Kill the self heal daemon process on nodes
        ret = bring_self_heal_daemon_process_offline(
            bring_offline_glustershd_nodes)
        self.assertTrue(
            ret, ("Unable to bring self heal daemon process"
                  " offline for nodes %s" % bring_offline_glustershd_nodes))
        g.log.info(
            "Sucessfully brought down self heal process for "
            "nodes %s", bring_offline_glustershd_nodes)

        # Check the heal info
        heal_info = get_heal_info_summary(self.mnode, self.volname)
        g.log.info("Successfully got heal info %s for the volume %s",
                   heal_info, self.volname)

        # Bring bricks online
        ret = bring_bricks_online(self.mnode, self.volname,
                                  bricks_to_bring_offline, 'glusterd_restart')
        self.assertTrue(
            ret,
            ("Failed to bring bricks: %s online" % bricks_to_bring_offline))

        # Issue heal
        ret = trigger_heal_full(self.mnode, self.volname)
        self.assertFalse(ret,
                         ("Able to trigger heal on volume %s where "
                          "self heal daemon is not running" % self.volname))
        g.log.info(
            "Expected : Unable to trigger heal on volume %s where "
            "self heal daemon is not running", self.volname)

        # Wait for 130 sec to heal
        ret = monitor_heal_completion(self.mnode, self.volname, 130)
        self.assertFalse(ret, ("Heal Completed on volume %s" % self.volname))
        g.log.info("Expected : Heal pending on volume %s", self.volname)

        # Check the heal info
        heal_info_after_triggering_heal = get_heal_info_summary(
            self.mnode, self.volname)
        g.log.info("Successfully got heal info for the volume %s",
                   self.volname)

        # Compare with heal pending with the files wrote
        for node in online_bricks:
            self.assertGreaterEqual(
                int(heal_info_after_triggering_heal[node]['numberOfEntries']),
                num_files_to_write,
                ("Some of the files are healed from source bricks %s where "
                 "self heal daemon is not running" % node))
        g.log.info("EXPECTED: No files are healed from source bricks where "
                   "self heal daemon is not running")

        # Unmount and Mount volume again as volume options were set
        # after mounting the volume
        for mount_obj in self.mounts:
            ret, _, _ = umount_volume(mount_obj.client_system,
                                      mount_obj.mountpoint)
            self.assertEqual(ret, 0,
                             "Failed to unmount %s" % mount_obj.client_system)
            ret, _, _ = mount_volume(self.volname,
                                     mtype='glusterfs',
                                     mpoint=mount_obj.mountpoint,
                                     mserver=self.mnode,
                                     mclient=mount_obj.client_system)
            self.assertEqual(ret, 0,
                             "Failed to mount %s" % mount_obj.client_system)

        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s read %s" %
                   (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # Validate IO
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "Reads failed on some of the clients")
        g.log.info("Reads successful on all mounts")

        # Wait for heal to complete
        ret = monitor_heal_completion(self.mnode, self.volname)
        self.assertTrue(ret, "Unable to heal the pending entries")
        g.log.info("Successfully healed the pending entries for volume %s",
                   self.volname)
예제 #13
0
    def test_validate_authreject_vol(self):
        """
        -Set Authentication Reject for client1
        -Check if bricks are online
        -Mounting the vol on client1
        -Check if bricks are online
        -Mounting the vol on client2
        -Reset the Volume
        -Check if bricks are online
        -Mounting the vol on client1
        """
        # pylint: disable=too-many-statements

        # Obtain hostname of clients
        ret, hostname_client1, _ = g.run(self.clients[0], "hostname")
        self.assertEqual(
            ret, 0,
            ("Failed to obtain hostname of client %s" % self.clients[0]))
        g.log.info("Obtained hostname of client. IP- %s, hostname- %s",
                   self.clients[0], hostname_client1.strip())

        # Set Authentication
        option = {"auth.reject": hostname_client1.strip()}
        ret = set_volume_options(self.mnode, self.volname, option)
        self.assertTrue(
            ret, ("Failed to set authentication with option: %s" % option))
        g.log.info("Authentication Set successfully with option: %s", option)

        # Fetching all the bricks
        self.mountpoint = "/mnt/testvol"
        bricks_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(bricks_list, "Brick list is empty")
        g.log.info("Brick List : %s", bricks_list)

        # Check are bricks online
        ret = are_bricks_online(self.mnode, self.volname, bricks_list)
        self.assertTrue(ret, "All bricks are not online")

        # Using this way to check because of bug 1586036
        # Mounting volume
        ret, _, _ = mount_volume(self.volname, self.mount_type,
                                 self.mountpoint, self.mnode, self.clients[0])

        # Checking if volume is mounted
        out = is_mounted(self.volname,
                         self.mountpoint,
                         self.mnode,
                         self.clients[0],
                         self.mount_type,
                         user='******')
        if (ret == 0) & (not out):
            g.log.error("Mount executed successfully due to bug 1586036")
        elif (ret == 1) & (not out):
            g.log.info("Expected:Mounting has failed successfully")
        else:
            raise ExecutionError(
                "Unexpected Mounting of Volume %s successful" % self.volname)

        # Checking client logs for authentication error
        cmd = ("grep AUTH_FAILED /var/log/glusterfs/mnt-" "testvol.log")
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(
            ret, 0, "Mounting has not failed due to"
            "authentication error")
        g.log.info("Mounting has failed due to authentication error")

        # Mounting the vol on client2
        # Check bricks are online
        ret = are_bricks_online(self.mnode, self.volname, bricks_list)
        self.assertTrue(ret, "All bricks are not online")

        # Mounting Volume
        ret, _, _ = mount_volume(self.volname, self.mount_type,
                                 self.mountpoint, self.mnode, self.clients[1])
        self.assertEqual(ret, 0, "Failed to mount volume")
        g.log.info("Mounted Successfully")

        # Checking if volume is mounted
        out = is_mounted(self.volname,
                         self.mountpoint,
                         self.mnode,
                         self.clients[1],
                         self.mount_type,
                         user='******')
        self.assertTrue(out, "Volume %s has failed to mount" % self.volname)

        # Reset Volume
        ret, _, _ = volume_reset(mnode=self.mnode, volname=self.volname)
        self.assertEqual(ret, 0, "Failed to reset volume")
        g.log.info("Volume %s reset operation is successful", self.volname)

        # Checking if bricks are online
        ret = are_bricks_online(self.mnode, self.volname, bricks_list)
        self.assertTrue(ret, "All bricks are not online")

        # Mounting Volume
        ret, _, _ = mount_volume(self.volname, self.mount_type,
                                 self.mountpoint, self.mnode, self.clients[0])
        self.assertEqual(ret, 0, "Failed to mount volume")
        g.log.info("Mounted Successfully")

        # Checking if Volume is mounted
        out = is_mounted(self.volname,
                         self.mountpoint,
                         self.servers[0],
                         self.clients[0],
                         self.mount_type,
                         user='******')
        self.assertTrue(out, "Volume %s has failed to mount" % self.volname)
        g.log.info("Volume is mounted successfully %s", self.volname)
    def test_snap_rebalance(self):
        # pylint: disable=too-many-statements, too-many-locals
        """

        Snapshot rebalance contains tests which verifies snapshot clone,
        creating snapshot and performing I/O on mountpoints

        Steps:

        1. Create snapshot of a volume
        2. Activate snapshot
        3. Clone snapshot and Activate
        4. Mount Cloned volume
        5. Perform I/O on mount point
        6. Calculate areequal for bricks and mountpoints
        7. Add-brick more brick to cloned volume
        8. Initiate Re-balance
        9. validate areequal of bricks and mountpoints
        """

        # Creating snapshot:
        g.log.info("Starting to Create snapshot")
        ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
        self.assertEqual(
            ret, 0, ("Failed to create snapshot for volume %s" % self.volname))
        g.log.info("Snapshot %s created successfully for volume %s", self.snap,
                   self.volname)

        # Activating snapshot
        g.log.info("Starting to Activate Snapshot")
        ret, _, _ = snap_activate(self.mnode, self.snap)
        self.assertEqual(ret, 0,
                         ("Failed to Activate snapshot %s" % self.snap))
        g.log.info("Snapshot %s activated successfully", self.snap)

        # Creating a Clone of snapshot:
        g.log.info("creating Clone Snapshot")
        ret, _, _ = snap_clone(self.mnode, self.snap, self.clone)
        self.assertEqual(ret, 0, ("Failed to clone volume %s" % self.clone))
        g.log.info("clone volume %s created successfully", self.clone)

        # Starting clone volume
        g.log.info("starting clone volume")
        ret, _, _ = volume_start(self.mnode, self.clone)
        self.assertEqual(ret, 0, "Failed to start %s" % self.clone)
        g.log.info("clone volume %s started successfully", self.clone)

        # Mounting a clone volume
        g.log.info("Mounting created clone volume")
        ret, _, _ = mount_volume(self.clone, self.mount_type, self.mount1,
                                 self.mnode, self.clients[0])
        self.assertEqual(ret, 0,
                         "clone Volume mount failed for %s" % self.clone)
        g.log.info("cloned volume %s mounted Successfully", self.clone)

        # Validate clone volume mounted or not
        g.log.info("Validate clone volume mounted or not")
        ret = is_mounted(self.clone, self.mount1, self.mnode, self.clients[0],
                         self.mount_type)
        self.assertTrue(
            ret, "Cloned Volume not mounted on mount point: %s" % self.mount1)
        g.log.info("Cloned Volume %s mounted on %s", self.clone, self.mount1)

        # write files to mountpoint
        g.log.info("Starting IO on %s mountpoint...", self.mount1)
        all_mounts_procs = []
        cmd = ("/usr/bin/env python %s create_files "
               "-f 10 --base-file-name file %s" %
               (self.script_upload_path, self.mount1))
        proc = g.run(self.clients[0], cmd)
        all_mounts_procs.append(proc)

        self.check_arequal()

        # expanding volume
        g.log.info("Starting to expand volume")
        ret = expand_volume(self.mnode, self.volname, self.servers,
                            self.all_servers_info)
        self.assertTrue(ret, "Failed to expand volume %s" % self.clone)
        g.log.info("Expand volume successful")

        ret, _, _ = rebalance_start(self.mnode, self.clone)
        self.assertEqual(ret, 0, "Failed to start rebalance")
        g.log.info("Successfully started rebalance on the "
                   "volume %s", self.clone)

        # Log Rebalance status
        g.log.info("Log Rebalance status")
        _, _, _ = rebalance_status(self.mnode, self.clone)

        # Wait for rebalance to complete
        g.log.info("Waiting for rebalance to complete")
        ret = wait_for_rebalance_to_complete(self.mnode, self.clone)
        self.assertTrue(ret, ("Rebalance is not yet complete "
                              "on the volume %s", self.clone))
        g.log.info("Rebalance is successfully complete on "
                   "the volume %s", self.clone)

        # Check Rebalance status after rebalance is complete
        g.log.info("Checking Rebalance status")
        ret, _, _ = rebalance_status(self.mnode, self.clone)
        self.assertEqual(ret, 0, ("Failed to get rebalance status for "
                                  "the volume %s", self.clone))
        g.log.info("Successfully got rebalance status of the "
                   "volume %s", self.clone)

        self.check_arequal()
    def test_spurious_rebalance(self):
        """
        In this test case:
        1. Trusted storage Pool of 3 nodes
        2. Create a distributed volumes with 3 bricks
        3. Start the volume
        4. Fuse mount the gluster volume on out of trusted nodes
        5. Remove a brick from the volume
        6. Check remove-brick status
        7. Stop the remove brick process
        8. Perform fix-layoyt on the volume
        9. Get the rebalance fix-layout status
       10. Create a directory from mount point
       11. Check trusted.glusterfs.dht extended attribue for newly
           created directory on the remove brick
        """

        # pylint: disable=too-many-statements
        my_servers = self.servers[0:3]
        my_server_info = {}
        for server in self.servers[0:3]:
            my_server_info[server] = self.all_servers_info[server]
        for index in range(1, 3):
            ret, _, _ = peer_probe(self.servers[0], self.servers[index])
            self.assertEqual(ret, 0, ("peer probe from %s to %s is failed",
                                      self.servers[0], self.servers[index]))
            g.log.info("peer probe is success from %s to "
                       "%s", self.servers[0], self.servers[index])

        self.volname = "testvol"
        bricks_list = form_bricks_list(self.mnode, self.volname, 3, my_servers,
                                       my_server_info)
        g.log.info("Creating a volume %s ", self.volname)
        ret, _, _ = volume_create(self.mnode,
                                  self.volname,
                                  bricks_list,
                                  force=False)
        self.assertEqual(ret, 0, ("Unable"
                                  "to create volume %s" % self.volname))
        g.log.info("Volume created successfully %s", self.volname)

        ret, _, _ = volume_start(self.mnode, self.volname, False)
        self.assertEqual(ret, 0, ("Failed to start the "
                                  "volume %s", self.volname))
        g.log.info("Get all the bricks of the volume")
        bricks_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(bricks_list, "Failed to get the brick list")
        g.log.info("Successfully got the list of bricks of volume")

        # Mounting a volume
        ret, _, _ = mount_volume(self.volname,
                                 mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.mnode,
                                 mclient=self.mounts[0].client_system)
        self.assertEqual(ret, 0, ("Volume %s is not mounted") % self.volname)
        g.log.info("Volume mounted successfully : %s", self.volname)
        remove_brick_list = []
        remove_brick_list.append(bricks_list[2])
        ret, _, _ = remove_brick(self.mnode, self.volname, remove_brick_list,
                                 'start')
        self.assertEqual(ret, 0, "Failed to start remove brick operation")
        g.log.info("Remove bricks operation started successfully")

        ret, _, _ = remove_brick(self.mnode, self.volname, remove_brick_list,
                                 'stop')
        self.assertEqual(ret, 0, "Failed to stop remove brick operation")
        g.log.info("Remove bricks operation stopped successfully")

        g.log.info("Starting Fix-layoyt on the volume")
        ret, _, _ = rebalance_start(self.mnode, self.volname, True)
        self.assertEqual(ret, 0, ("Failed to start rebalance for fix-layout"
                                  "on the volume %s", self.volname))
        g.log.info("Successfully started fix-layout on the volume %s",
                   self.volname)

        # Wait for fix-layout to complete
        g.log.info("Waiting for fix-layout to complete")
        ret = wait_for_fix_layout_to_complete(self.mnode, self.volname)
        self.assertTrue(ret, ("Fix-layout is not yet complete on the volume "
                              "%s", self.volname))
        g.log.info("Fix-layout is successfully complete on the volume %s",
                   self.volname)
        ret = mkdir(self.mounts[0].client_system,
                    "%s/dir1" % self.mounts[0].mountpoint)
        self.assertTrue(ret, ("Failed to create directory dir1"))
        g.log.info("directory dir1 is created successfully")

        brick_server, brick_dir = remove_brick_list[0].split(':')
        folder_name = brick_dir + "/dir1"
        g.log.info("Check trusted.glusterfs.dht on host  %s for directory %s",
                   brick_server, folder_name)

        ret = get_fattr(brick_server, folder_name, 'trusted.glusterfs.dht')
        self.assertTrue(
            ret, ("Failed to get trusted.glusterfs.dht for %s" % folder_name))
        g.log.info("get trusted.glusterfs.dht xattr for %s successfully",
                   folder_name)
    def test_rebalance_hang(self):
        """
        In this test case:
        1. Trusted storage Pool of 2 nodes
        2. Create a distributed volumes with 2 bricks
        3. Start the volume
        4. Mount the volume
        5. Add some data file on mount
        6. Start rebalance with force
        7. kill glusterd on 2nd node
        8. Issue volume related command
        """

        # pylint: disable=too-many-statements
        my_server_info = {
            self.servers[0]: self.all_servers_info[self.servers[0]]
        }
        my_servers = self.servers[0:2]
        index = 1
        ret, _, _ = peer_probe(self.servers[0], self.servers[index])
        self.assertEqual(ret, 0, ("peer probe from %s to %s is failed",
                                  self.servers[0], self.servers[index]))
        g.log.info("peer probe is success from %s to "
                   "%s", self.servers[0], self.servers[index])
        key = self.servers[index]
        my_server_info[key] = self.all_servers_info[key]

        self.volname = "testvol"
        bricks_list = form_bricks_list(self.mnode, self.volname, 2, my_servers,
                                       my_server_info)
        g.log.info("Creating a volume %s ", self.volname)
        ret, _, _ = volume_create(self.mnode,
                                  self.volname,
                                  bricks_list,
                                  force=False)
        self.assertEqual(ret, 0, ("Unable"
                                  "to create volume %s" % self.volname))
        g.log.info("Volume created successfully %s", self.volname)

        ret, _, _ = volume_start(self.mnode, self.volname, False)
        self.assertEqual(ret, 0, ("Failed to start the "
                                  "volume %s", self.volname))
        g.log.info("Get all the bricks of the volume")
        bricks_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(bricks_list, "Failed to get the brick list")
        g.log.info("Successfully got the list of bricks of volume")

        # Mounting a volume
        ret, _, _ = mount_volume(self.volname,
                                 mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.mnode,
                                 mclient=self.mounts[0].client_system)
        self.assertEqual(ret, 0, ("Volume %s is not mounted") % self.volname)
        g.log.info("Volume mounted successfully : %s", self.volname)

        self.all_mounts_procs = []
        # Creating files
        command = ("cd %s/ ; "
                   "for i in `seq 1 10` ; "
                   "do mkdir l1_dir.$i ; "
                   "for j in `seq 1 5` ; "
                   "do mkdir l1_dir.$i/l2_dir.$j ; "
                   "for k in `seq 1 10` ; "
                   "do dd if=/dev/urandom of=l1_dir.$i/l2_dir.$j/test.$k "
                   "bs=128k count=$k ; "
                   "done ; "
                   "done ; "
                   "done ; " % (self.mounts[0].mountpoint))

        proc = g.run_async(self.mounts[0].client_system,
                           command,
                           user=self.mounts[0].user)
        self.all_mounts_procs.append(proc)
        self.io_validation_complete = False
        # Validate IO
        ret = validate_io_procs(self.all_mounts_procs, self.mounts)
        self.io_validation_complete = True
        self.assertTrue(ret, "IO failed on some of the clients")

        g.log.info("Starting rebalance with force on the volume")
        ret, _, _ = rebalance_start(self.mnode, self.volname, False, True)
        self.assertEqual(
            ret, 0, ("Failed to start rebalance for volume %s", self.volname))
        g.log.info("Successfully rebalance on the volume %s", self.volname)

        ret = stop_glusterd(self.servers[1])
        self.assertTrue(ret, "Failed to stop glusterd on one of the node")
        ret = is_glusterd_running(self.servers[1])
        self.assertNotEqual(
            ret, 0, ("Glusterd is not stopped on servers %s", self.servers[1]))
        g.log.info("Glusterd stop on the nodes : %s succeeded",
                   self.servers[1])

        # Wait for fix-layout to complete
        g.log.info("Waiting for rebalance to complete")
        ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
        self.assertTrue(ret, ("Rebalance is not yet complete on the volume "
                              "%s", self.volname))
        g.log.info("Rebalance is successfully complete on the volume %s",
                   self.volname)

        vol_status = get_volume_status(self.mnode, self.volname)
        self.assertIsNotNone(
            vol_status, "Failed to get volume "
            "status for %s" % self.volname)

        # Start glusterd on the node where it is stopped
        ret = start_glusterd(self.servers[1])
        self.assertTrue(ret, "glusterd start on the node failed")
        count = 0
        while count < 60:
            ret = is_glusterd_running(self.servers[1])
            if not ret:
                break
            sleep(2)
            count += 1
        self.assertEqual(ret, 0,
                         "glusterd is not running on %s" % self.servers[1])
        g.log.info("Glusterd start on the nodes : %s "
                   "succeeded", self.servers[1])
    def test_subdir_when_renamed(self):

        # pylint: disable=too-many-statements
        """
        Mount the volume
        Create 1 subdir on mountpoint "d1"
        Auth allow - Client1(d1),Client2(full volume)
        Mount the subdir "d1" on client1 and volume on client2
        Start IO's on all the mount points
        Perform rename operation from client2.Rename the subdir
        "d1" to "d1_renamed"
        unmount volume and subdir from clients
        Try mounting "d1" on client 1.This should fail.
        Try mounting "d1_renamed" on client 1.This should fail.
        Again set authentication.Auth allow -
        Client1(d1_renamed),Client2(full volume)
        Mount "d1_renamed" on client1 and volume on client2
        """

        # Create  directory d1 on mount point
        ret = mkdir(self.mounts[0].client_system,
                    "%s/d1" % self.mounts[0].mountpoint)
        self.assertTrue(
            ret, ("Failed to create directory 'd1' on"
                  "volume %s from client %s" %
                  (self.mounts[0].volname, self.mounts[0].client_system)))
        # unmount volume
        ret = self.unmount_volume(self.mounts)
        self.assertTrue(ret, "Volumes Unmount failed")
        g.log.info("Volumes Unmounted successfully")

        # Set authentication on the subdirectoy "d1" to access by client1
        # and volume to access by client2
        g.log.info(
            'Setting authentication on subdirectory d1 to access'
            'by client %s and on volume to access by client %s',
            self.clients[0], self.clients[1])
        ret = set_auth_allow(self.volname, self.mnode, {
            '/d1': [self.clients[0]],
            '/': [self.clients[1]]
        })
        self.assertTrue(
            ret, 'Failed to set Authentication on volume %s' % self.volume)

        # Creating mount list for mounting subdir mount and volume
        self.subdir_mounts = [
            copy.deepcopy(self.mounts[0]),
            copy.deepcopy(self.mounts[1])
        ]
        self.subdir_mounts[0].volname = "%s/d1" % self.volname
        self.subdir_mounts[0].client_system = self.clients[0]
        self.subdir_mounts[1].client_system = self.clients[1]

        # Mount Subdirectory d1 on client 1 and volume on client 2
        for mount_obj in self.subdir_mounts:
            mountpoint = mount_obj.mountpoint
            ret = mount_obj.mount()
            self.assertTrue(
                ret, ("Failed to mount  %s on client"
                      " %s" % (mount_obj.volname, mount_obj.client_system)))
            g.log.info("Successfully mounted %s on client %s",
                       mount_obj.volname, mount_obj.client_system)
        g.log.info("Successfully mounted sub directory and volume to"
                   "authenticated clients")

        # Start IO on all the mounts.
        all_mounts_procs = []
        count = 1
        for mount_obj in self.subdir_mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.subdir_mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.subdir_mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Rename the subdir "d1" to "d1_renamed" from client2
        source_fpath = "%s/d1" % mountpoint
        dest_fpath = "%s/d1_renamed" % mountpoint
        ret = move_file(self.clients[1], source_fpath, dest_fpath)
        self.assertTrue(ret, "Rename subdirectory failed")
        g.log.info('Renamed directory %s to %s', source_fpath, dest_fpath)

        # unmount volume and subdir from client
        ret = self.unmount_volume(self.subdir_mounts)
        self.assertTrue(ret, "Volumes UnMount failed")
        g.log.info("Volumes Unmounted successfully")

        # Try mounting subdir "d1" on client1
        _, _, _ = mount_volume("%s/d1" % self.volname, self.mount_type,
                               mountpoint, self.mnode, self.clients[0])

        ret = is_mounted(self.volname, mountpoint, self.mnode, self.clients[0],
                         self.mount_type)
        self.assertEqual(
            ret, 0, "d1 mount should have failed.But d1 is"
            "successfully mounted on mount point: %s" % mountpoint)
        g.log.info("subdir %s/d1 is not mounted as expected %s", self.volname,
                   mountpoint)

        # Try mounting subdir "d1_renamed" on client1
        _, _, _ = mount_volume("%s/d1_renamed" % self.volname, self.mount_type,
                               mountpoint, self.mnode, self.clients[0])

        ret = is_mounted("%s/d1_renamed" % self.volname, mountpoint,
                         self.mnode, self.clients[0], self.mount_type)
        self.assertEqual(
            ret, 0, "d1_renamed mount should have failed.But"
            "d1_renamed is successfully mounted on : %s" % mountpoint)
        g.log.info("subdir %s/d1_renamed is not mounted as expected %s",
                   self.volname, mountpoint)

        # Set authentication on the subdirectoy "d1_renamed" to access
        # by client1 and volume to access by client2
        g.log.info(
            'Setting authentication on subdirectory d1_renamed to'
            'access by client %s and on volume to access by client %s',
            self.clients[0], self.clients[1])
        ret = set_auth_allow(self.volname, self.mnode, {
            '/d1_renamed': [self.clients[0]],
            '/': [self.clients[1]]
        })
        self.assertTrue(
            ret, 'Failed to set Authentication on volume %s' % self.volume)

        # Overwriting the list of subdir mount, directory d1 to d1_renamed
        self.subdir_mounts[0].volname = "%s/d1_renamed" % self.volname

        # Mount Subdirectory d1_renamed on client 1 and volume on client 2
        for mount_obj in self.subdir_mounts:
            ret = mount_obj.mount()
            self.assertTrue(
                ret, ("Failed to mount  %s on client"
                      " %s" % (mount_obj.volname, mount_obj.client_system)))
            g.log.info("Successfully mounted %s on client %s",
                       mount_obj.volname, mount_obj.client_system)

        g.log.info("Successfully mounted sub directory and volume to"
                   "authenticated clients")

        # Get stat of all the files/dirs created from both clients.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.subdir_mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")
    def test_create_vol_used_bricks(self):
        '''
        -> Create distributed-replica Volume
        -> Add 6 bricks to the volume
        -> Mount the volume
        -> Perform some I/O's on mount point
        -> unmount the volume
        -> Stop and delete the volume
        -> Create another volume using bricks of deleted volume
        '''

        # Create and start a volume
        self.volume['name'] = "test_create_vol_with_fresh_bricks"
        self.volname = "test_create_vol_with_fresh_bricks"
        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertTrue(ret, "Failed to create and start volume")

        # Forming brick list
        brick_list = form_bricks_list(self.mnode, self.volname, 6,
                                      self.servers, self.all_servers_info)
        # Adding bricks to the volume
        ret, _, _ = add_brick(self.mnode, self.volname, brick_list)
        self.assertEqual(
            ret, 0, "Failed to add bricks to the volume %s" % self.volname)
        g.log.info("Bricks added successfully to the volume %s", self.volname)

        # Mounting the volume.
        for mount_obj in self.mounts:
            ret, _, _ = mount_volume(self.volname,
                                     mtype=self.mount_type,
                                     mpoint=mount_obj.mountpoint,
                                     mserver=self.mnode,
                                     mclient=mount_obj.client_system)
            self.assertEqual(ret, 0,
                             ("Volume %s is not mounted") % (self.volname))
            g.log.info("Volume mounted successfully : %s", self.volname)

        # run IOs
        g.log.info("Starting IO on all mounts...")
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = (
                "/usr/bin/env python %s create_deep_dirs_with_files "
                "--dirname-start-num %d --dir-depth 2 "
                "--dir-length 5 --max-num-of-dirs 3 "
                "--num-of-files 10 %s" %
                (self.script_upload_path, self.counter, mount_obj.mountpoint))

            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
            self.counter = self.counter + 10

        # Validate IO
        self.assertTrue(validate_io_procs(self.all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")

        # Unmouting the volume.
        for mount_obj in self.mounts:
            ret, _, _ = umount_volume(mclient=mount_obj.client_system,
                                      mpoint=mount_obj.mountpoint)
            self.assertEqual(ret, 0,
                             "Volume %s is not unmounted" % (self.volname))
            g.log.info("Volume unmounted successfully : %s", self.volname)

        # Getting brick list
        self.brick_list = get_all_bricks(self.mnode, self.volname)
        if not self.brick_list:
            raise ExecutionError("Failed to get the brick list of %s" %
                                 self.volname)

        # Stop volume
        ret, _, _ = volume_stop(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to stop the volume %s" % self.volname)
        g.log.info("Volume %s stopped successfully", self.volname)

        # Delete Volume
        ret, _, _ = g.run(
            self.mnode,
            "gluster volume delete %s --mode=script" % self.volname)
        self.assertEqual(ret, 0, "Failed to delete volume %s" % self.volname)
        g.log.info("Volume deleted successfully %s", self.volname)

        # Create another volume by using bricks of deleted volume
        self.volname = "test_create_vol_used_bricks"
        ret, _, err = volume_create(self.mnode,
                                    self.volname,
                                    brick_list[0:6],
                                    replica_count=3)
        self.assertNotEqual(
            ret, 0, "Volume creation should fail with used "
            "bricks but volume creation success")
        g.log.info("Failed to create volume with used bricks")

        # Checking failed message of volume creation
        msg = ' '.join([
            'volume create: test_create_vol_used_bricks: failed:',
            brick_list[0].split(':')[1], 'is already part of a volume'
        ])
        self.assertIn(
            msg, err, "Incorrect error message for volume creation "
            "with used bricks")
        g.log.info("correct error message for volume creation with "
                   "used bricks")
    def test_detach_node_used_to_mount(self):
        # pylint: disable=too-many-statements
        """
        Test case:
        1.Create a 1X3 volume with only 3 nodes from the cluster.
        2.Mount volume on client node using the ip of the fourth node.
        3.Write IOs to the volume.
        4.Detach node N4 from cluster.
        5.Create a new directory on the mount point.
        6.Create a few files using the same command used in step 3.
        7.Add three more bricks to make the volume
          2x3 using add-brick command.
        8.Do a gluster volume rebalance on the volume.
        9.Create more files from the client on the mount point.
        10.Check for files on bricks from both replica sets.
        11.Create a new directory from the client on the mount point.
        12.Check for directory in both replica sets.
        """

        # Create and start a volume
        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertTrue(ret, "Failed to create and start volume")
        g.log.info("Volume %s created successfully", self.volname)

        # Mounting the volume.
        ret, _, _ = mount_volume(self.volname,
                                 mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.servers[4],
                                 mclient=self.mounts[0].client_system)
        self.assertEqual(ret, 0, ("Volume %s is not mounted") % self.volname)
        g.log.info("Volume mounted successfully using %s", self.servers[4])

        # Creating 100 files.
        command = ('for number in `seq 1 100`;do touch ' +
                   self.mounts[0].mountpoint + '/file$number; done')
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "File creation failed.")
        g.log.info("Files create on mount point.")

        # Detach N4 from the list.
        ret, _, _ = peer_detach(self.mnode, self.servers[4])
        self.assertEqual(ret, 0, "Failed to detach %s" % self.servers[4])
        g.log.info("Peer detach successful %s", self.servers[4])

        # Creating a dir.
        ret = mkdir(self.mounts[0].client_system,
                    self.mounts[0].mountpoint + "/dir1",
                    parents=True)
        self.assertTrue(ret, ("Failed to create directory dir1."))
        g.log.info("Directory dir1 created successfully.")

        # Creating 100 files.
        command = ('for number in `seq 101 200`;do touch ' +
                   self.mounts[0].mountpoint + '/file$number; done')
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "File creation failed.")
        g.log.info("Files create on mount point.")

        # Forming brick list
        brick_list = form_bricks_list_to_add_brick(self.mnode, self.volname,
                                                   self.servers,
                                                   self.all_servers_info)

        # Adding bricks
        ret, _, _ = add_brick(self.mnode, self.volname, brick_list)
        self.assertEqual(ret, 0,
                         "Failed to add brick to the volume %s" % self.volname)
        g.log.info("Brick added successfully to the volume %s", self.volname)

        # Start rebalance for volume.
        g.log.info("Starting rebalance on the volume")
        ret, _, _ = rebalance_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("Failed to start rebalance "
                                  "on the volume %s", self.volname))
        g.log.info("Successfully started rebalance on the volume %s",
                   self.volname)

        # Creating 100 files.
        command = ('for number in `seq 201 300`;do touch ' +
                   self.mounts[0].mountpoint + '/file$number; done')
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "File creation failed.")
        g.log.info("Files create on mount point.")

        # Check for files on bricks.
        attempts = 10
        while attempts:
            number = str(randint(1, 300))
            for brick in brick_list:
                brick_server, brick_dir = brick.split(':')
                file_name = brick_dir + "/file" + number
                if file_exists(brick_server, file_name):
                    g.log.info("Check xattr"
                               " on host %s for file %s", brick_server,
                               file_name)
                    ret = get_fattr_list(brick_server, file_name)
                    self.assertTrue(ret,
                                    ("Failed to get xattr for %s" % file_name))
                    g.log.info("Got xattr for %s successfully", file_name)
            attempts -= 1

        # Creating a dir.
        ret = mkdir(self.mounts[0].client_system,
                    self.mounts[0].mountpoint + "/dir2")
        if not ret:
            attempts = 5
            while attempts:
                ret = mkdir(self.mounts[0].client_system,
                            self.mounts[0].mountpoint + "/dir2")
                if ret:
                    break
                attempts -= 1
        self.assertTrue(ret, ("Failed to create directory dir2."))
        g.log.info("Directory dir2 created successfully.")

        # Check for directory in both replica sets.
        for brick in brick_list:
            brick_server, brick_dir = brick.split(':')
            folder_name = brick_dir + "/dir2"
            if file_exists(brick_server, folder_name):
                g.log.info(
                    "Check trusted.glusterfs.dht"
                    " on host %s for directory %s", brick_server, folder_name)
                ret = get_fattr(brick_server, folder_name,
                                'trusted.glusterfs.dht')
                self.assertTrue(ret, ("Failed to get trusted.glusterfs.dht"
                                      " xattr for %s" % folder_name))
                g.log.info(
                    "Get trusted.glusterfs.dht xattr"
                    " for %s successfully", folder_name)
    def test_induce_holes_then_lookup(self):

        # pylint: disable=too-many-statements
        m_point = self.mounts[0].mountpoint
        command = 'mkdir -p ' + m_point + '/testdir'
        ret, _, _ = g.run(self.clients[0], command)
        self.assertEqual(ret, 0, "mkdir failed")
        g.log.info("mkdir is successful")

        # DHT Layout validation
        g.log.debug("Verifying hash layout values %s:%s", self.clients[0],
                    self.mounts[0].mountpoint)
        ret = validate_files_in_dir(self.clients[0],
                                    self.mounts[0].mountpoint,
                                    test_type=LAYOUT_IS_COMPLETE,
                                    file_type=FILETYPE_DIRS)
        self.assertTrue(ret, "LAYOUT_IS_COMPLETE: FAILED")
        g.log.info("LAYOUT_IS_COMPLETE: PASS")

        # Log Volume Info and Status before shrinking the volume.
        g.log.info("Logging volume info and Status before shrinking volume")
        log_volume_info_and_status(self.mnode, self.volname)

        # Shrinking volume by removing bricks
        g.log.info("Start removing bricks from volume")
        ret, _, _ = remove_brick(self.mnode, self.volname,
                                 self.remove_brick_list, "force")
        self.assertFalse(ret, "Remove-brick with force: FAIL")
        g.log.info("Remove-brick with force: PASS")

        # Check the layout
        dirpath = '/testdir'
        ret = is_layout_complete(self.mnode, self.volname, dirpath)
        self.assertFalse(ret, "Volume %s: Layout is complete")
        g.log.info("Volume %s: Layout has some holes")

        # Mount the volume on a new mount point
        mount_point = tempfile.mkdtemp()
        ret, _, _ = mount_volume(self.volname,
                                 mtype='glusterfs',
                                 mpoint=mount_point,
                                 mserver=self.mnode,
                                 mclient=self.mnode)
        self.assertEqual(
            ret, 0, ("Failed to do gluster mount on volume %s", self.volname))
        g.log.info("Volume %s: mount success", self.mnode)

        # Send a look up on the directory
        cmd = 'ls %s%s' % (mount_point, dirpath)
        ret, _, err = g.run(self.mnode, cmd)
        self.assertEqual(ret, 0,
                         ("Lookup failed on %s with error %s", (dirpath, err)))
        g.log.info("Lookup sent successfully on %s", dirpath)

        # DHT Layout validation
        g.log.debug("Verifying hash layout values %s:%s", self.clients[0],
                    self.mounts[0].mountpoint)
        ret = validate_files_in_dir(self.clients[0],
                                    self.mounts[0].mountpoint,
                                    test_type=LAYOUT_IS_COMPLETE,
                                    file_type=FILETYPE_DIRS)
        self.assertTrue(ret, "LAYOUT_IS_COMPLETE: FAILED")
        g.log.info("LAYOUT_IS_COMPLETE: PASS")
예제 #21
0
    def test_clone_delete_snap(self):
        """
        clone from snap of one volume
        * Create and Mount the volume
        * Enable some volume options
        * Creating 2 snapshots and activate
        * reset the volume
        * create a clone of snapshots created
        * Mount both the clones
        * Perform I/O on mount point
        * Check volume options of cloned volumes
        * Create snapshot of the cloned snapshot volume
        * cleanup snapshots and volumes
        """

        # pylint: disable=too-many-statements, too-many-locals
        # Enabling Volume options on the volume and validating
        g.log.info("Enabling volume options for volume %s ", self.volname)
        options = {" features.uss": "enable"}
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(
            ret, ("Failed to set volume options for volume %s" % self.volname))
        g.log.info("Successfully set volume options"
                   "for volume %s", self.volname)

        # Validate feature.uss enabled or not
        g.log.info("Validating feature.uss is enabled")
        option = "features.uss"
        vol_option = get_volume_options(self.mnode, self.volname, option)
        self.assertEqual(vol_option['features.uss'], 'enable', "Failed"
                         " to validate "
                         "volume options")
        g.log.info("Successfully validated volume options"
                   "for volume %s", self.volname)

        # Creating snapshot
        g.log.info("Starting to Create snapshot")
        for snap_count in range(0, 2):
            ret, _, _ = snap_create(self.mnode, self.volname,
                                    "snap%s" % snap_count)
            self.assertEqual(
                ret, 0,
                ("Failed to create snapshot for volume %s" % self.volname))
            g.log.info("Snapshot snap%s created successfully"
                       "for volume %s", snap_count, self.volname)

        # Activating snapshot
        g.log.info("Starting to Activate Snapshot")
        for snap_count in range(0, 2):
            ret, _, _ = snap_activate(self.mnode, "snap%s" % snap_count)
            self.assertEqual(
                ret, 0, ("Failed to Activate snapshot snap%s" % snap_count))
            g.log.info("Snapshot snap%s activated successfully", snap_count)

        # Reset volume:
        g.log.info("Starting to Reset Volume")
        ret, _, _ = volume_reset(self.mnode, self.volname, force=False)
        self.assertEqual(ret, 0, ("Failed to reset volume %s" % self.volname))
        g.log.info("Reset Volume on volume %s is Successful", self.volname)

        # Validate feature.uss enabled or not
        g.log.info("Validating feature.uss is enabled")
        option = "features.uss"
        vol_option = get_volume_options(self.mnode, self.volname, option)
        self.assertEqual(vol_option['features.uss'], 'off', "Failed"
                         " to validate "
                         "volume options")
        g.log.info("Successfully validated volume options"
                   "for volume %s", self.volname)

        # Verify volume's all process are online
        g.log.info("Starting to Verify volume's all process are online")
        ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
        self.assertTrue(ret, ("Volume %s : All process are"
                              "not online" % self.volname))
        g.log.info("Volume %s : All process are online", self.volname)

        # Creating and starting a Clone of snapshot
        g.log.info("Starting to Clone Snapshot")
        for clone_count in range(0, 2):
            ret, _, _ = snap_clone(self.mnode, "snap%s" % clone_count,
                                   "clone%s" % clone_count)
            self.assertEqual(ret, 0,
                             ("Failed to clone clone%s volume" % clone_count))
            g.log.info("clone%s volume created successfully", clone_count)

        # Start Cloned volume
        g.log.info("starting to Validate clone volumes are started")
        for clone_count in range(0, 2):
            ret, _, _ = volume_start(self.mnode, "clone%s" % clone_count)
            self.assertEqual(ret, 0, ("Failed to start clone%s" % clone_count))
            g.log.info("clone%s started successfully", clone_count)
        g.log.info("All the clone volumes are started Successfully")

        # Validate Volume start of cloned volume
        g.log.info("Starting to Validate Volume start")
        for clone_count in range(0, 2):
            vol_info = get_volume_info(self.mnode, "clone%s" % clone_count)
            if vol_info["clone%s" % clone_count]['statusStr'] != 'Started':
                raise ExecutionError("Failed to get volume info for clone%s" %
                                     clone_count)
            g.log.info("Volume clone%s is in Started state", clone_count)

        # Validate feature.uss enabled or not
        g.log.info("Validating feature.uss is enabled")
        option = "features.uss"
        for clone_count in range(0, 2):
            vol_option = get_volume_options(self.mnode,
                                            "clone%s" % clone_count, option)
            self.assertEqual(vol_option['features.uss'], 'enable', "Failed"
                             " to validate"
                             "volume options")
            g.log.info(
                "Successfully validated volume options"
                "for volume clone%s", clone_count)

        # Mount both the cloned volumes
        g.log.info("Mounting Cloned Volumes")
        for mount_obj in range(0, 2):
            self.mpoint = "/mnt/clone%s" % mount_obj
            cmd = "mkdir -p  %s" % self.mpoint
            ret, _, _ = g.run(self.clients[0], cmd)
            self.assertEqual(ret, 0, ("Creation of directory %s"
                                      "for mounting"
                                      "volume %s failed: Directory already"
                                      "present" %
                                      (self.mpoint, "clone%s" % mount_obj)))
            g.log.info(
                "Creation of directory %s for mounting volume %s "
                "success", self.mpoint, ("clone%s" % mount_obj))
            ret, _, _ = mount_volume("clone%s" % mount_obj, self.mount_type,
                                     self.mpoint, self.mnode, self.clients[0])
            self.assertEqual(ret, 0, ("clone%s is not mounted" % mount_obj))
            g.log.info("clone%s is mounted Successfully", mount_obj)

        # Perform I/O on mount
        # Start I/O on all mounts
        g.log.info("Starting to Perform I/O on Mountpoint")
        all_mounts_procs = []
        for mount_obj in range(0, 2):
            cmd = ("cd /mnt/clone%s/; for i in {1..10};"
                   "do touch file$i; done; cd;") % mount_obj
            proc = g.run(self.clients[0], cmd)
            all_mounts_procs.append(proc)
        g.log.info("I/O on mountpoint is successful")

        # create snapshot
        g.log.info("Starting to Create snapshot of clone volume")
        ret0, _, _ = snap_create(self.mnode, "clone0", "snap2")
        self.assertEqual(ret0, 0, "Failed to create the snapshot"
                         "snap2 from clone0")
        g.log.info("Snapshots snap2 created successfully from clone0")
        ret1, _, _ = snap_create(self.mnode, "clone1", "snap3")
        self.assertEqual(ret1, 0, "Failed to create the snapshot snap3"
                         "from clone1")
        g.log.info("Snapshots snap3 created successfully from clone1")

        # Listing all Snapshots present
        g.log.info("Starting to list all snapshots")
        ret, _, _ = snap_list(self.mnode)
        self.assertEqual(ret, 0, ("Failed to list snapshots present"))
        g.log.info("Snapshots successfully listed")
    def test_snap_self_heal(self):
        """
        Steps:

        1. create a volume
        2. mount volume
        3. create snapshot of that volume
        4. Activate snapshot
        5. Clone snapshot and Mount
        6. Perform I/O
        7. Bring Down Few bricks from volume without
           affecting the volume or cluster.
        8. Perform I/O
        9. Bring back down bricks to online
        10. Validate heal is complete with areequal

        """
        # pylint: disable=too-many-statements, too-many-locals
        # Creating snapshot:
        g.log.info("Starting to Create snapshot")
        ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
        self.assertEqual(
            ret, 0, ("Failed to create snapshot for volume %s" % self.volname))
        g.log.info("Snapshot %s created successfully for volume %s", self.snap,
                   self.volname)

        # Activating snapshot
        g.log.info("Starting to Activate Snapshot")
        ret, _, _ = snap_activate(self.mnode, self.snap)
        self.assertEqual(ret, 0,
                         ("Failed to Activate snapshot %s" % self.snap))
        g.log.info("Snapshot %s activated successfully", self.snap)

        # snapshot list
        ret, _, _ = snap_list(self.mnode)
        self.assertEqual(ret, 0, ("Failed to list all the snapshot"))
        g.log.info("Snapshot list command was successful")

        # Creating a Clone volume from snapshot:
        g.log.info("Starting to Clone volume from Snapshot")
        ret, _, _ = snap_clone(self.mnode, self.snap, self.clone)
        self.assertEqual(ret, 0, ("Failed to clone %s from snapshot %s" %
                                  (self.clone, self.snap)))
        g.log.info("%s created successfully", self.clone)

        #  start clone volumes
        g.log.info("start to created clone volumes")
        ret, _, _ = volume_start(self.mnode, self.clone)
        self.assertEqual(ret, 0, "Failed to start clone %s" % self.clone)
        g.log.info("clone volume %s started successfully", self.clone)

        # Mounting a clone volume
        g.log.info("Mounting a clone volume")
        ret, _, _ = mount_volume(self.clone, self.mount_type, self.mount1,
                                 self.mnode, self.clients[0])
        self.assertEqual(ret, 0,
                         "Failed to mount clone Volume %s" % self.clone)
        g.log.info("Clone volume %s mounted Successfully", self.clone)

        # Checking cloned volume mounted or not
        ret = is_mounted(self.clone, self.mount1, self.mnode, self.clients[0],
                         self.mount_type)
        self.assertTrue(
            ret,
            "Failed to mount clone volume on mount point: %s" % self.mount1)
        g.log.info("clone Volume %s mounted on %s", self.clone, self.mount1)

        # write files on all mounts
        g.log.info("Starting IO on all mounts...")
        g.log.info("mounts: %s", self.mount1)
        all_mounts_procs = []
        cmd = ("python %s create_files "
               "-f 10 --base-file-name file %s" %
               (self.script_upload_path, self.mount1))
        proc = g.run(self.clients[0], cmd)
        all_mounts_procs.append(proc)
        g.log.info("Successful in creating I/O on mounts")

        # get the bricks from the volume
        g.log.info("Fetching bricks for the volume : %s", self.clone)
        bricks_list = get_all_bricks(self.mnode, self.clone)
        g.log.info("Brick List : %s", bricks_list)

        # Select bricks to bring offline
        g.log.info("Starting to bring bricks to offline")
        bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
            self.mnode, self.volname))
        bricks_to_bring_offline = filter(
            None, (bricks_to_bring_offline_dict['hot_tier_bricks'] +
                   bricks_to_bring_offline_dict['cold_tier_bricks'] +
                   bricks_to_bring_offline_dict['volume_bricks']))
        g.log.info("Brick to bring offline: %s ", bricks_to_bring_offline)
        ret = bring_bricks_offline(self.clone, bricks_to_bring_offline)
        self.assertTrue(ret, "Failed to bring the bricks offline")
        g.log.info("Successful in bringing bricks: %s offline",
                   bricks_to_bring_offline)

        # Offline Bricks list
        offline_bricks = get_offline_bricks_list(self.mnode, self.clone)
        self.assertIsNotNone(
            offline_bricks, "Failed to get offline bricklist"
            "for volume %s" % self.clone)
        for bricks in offline_bricks:
            self.assertIn(bricks, bricks_to_bring_offline,
                          "Failed to validate "
                          "Bricks offline")
        g.log.info("Bricks Offline: %s", offline_bricks)

        # Online Bricks list
        online_bricks = get_online_bricks_list(self.mnode, self.clone)
        self.assertIsNotNone(
            online_bricks, "Failed to get online bricks"
            " for volume %s" % self.clone)
        g.log.info("Bricks Online: %s", online_bricks)

        # write files mountpoint
        g.log.info("Starting IO on all mounts...")
        g.log.info("mounts: %s", self.mount1)
        all_mounts_procs = []
        cmd = ("python %s create_files "
               "-f 10 --base-file-name file %s" %
               (self.script_upload_path, self.mount1))
        proc = g.run(self.clients[0], cmd)
        all_mounts_procs.append(proc)
        g.log.info("Successful in creating I/O on mounts")

        # Bring all bricks online
        g.log.info("bring all bricks online")
        ret = bring_bricks_online(self.mnode, self.clone,
                                  bricks_to_bring_offline)
        self.assertTrue(ret, "Failed to bring bricks online")
        g.log.info("Successful in bringing all bricks online")

        # Validate Bricks are online
        g.log.info("Validating all bricks are online")
        ret = are_bricks_online(self.mnode, self.clone, bricks_list)
        self.assertTrue(ret, "Failed to bring all the bricks online")
        g.log.info("bricks online: %s", bricks_list)

        # Wait for volume processes to be online
        g.log.info("Wait for volume processes to be online")
        ret = wait_for_volume_process_to_be_online(self.mnode, self.clone)
        self.assertTrue(ret, ("Failed to wait for volume %s processes to "
                              "be online" % self.clone))
        g.log.info(
            "Successful in waiting for volume %s processes to be "
            "online", self.clone)

        # Verify volume's all process are online
        g.log.info("Verifying volume's all process are online")
        ret = verify_all_process_of_volume_are_online(self.mnode, self.clone)
        self.assertTrue(
            ret, ("Volume %s : All process are not online" % self.clone))
        g.log.info("Volume %s : All process are online", self.clone)

        # wait for the heal process to complete
        g.log.info("waiting for heal process to complete")
        ret = monitor_heal_completion(self.mnode, self.volname)
        self.assertTrue(ret, "Failed to complete the heal process")
        g.log.info("Successfully completed heal process")

        # Check areequal
        # get the subvolumes
        g.log.info("Starting to get sub-volumes for volume %s", self.clone)
        subvols = get_subvols(self.mnode, self.clone)
        num_subvols = len(subvols['volume_subvols'])
        g.log.info("Number of subvolumes in volume %s:", num_subvols)

        # Get arequals and compare
        g.log.info("Starting to Compare areequals")
        for i in range(0, num_subvols):
            # Get arequal for first brick
            subvol_brick_list = subvols['volume_subvols'][i]
            node, brick_path = subvol_brick_list[0].split(':')
            command = ('arequal-checksum -p %s '
                       '-i .glusterfs -i .landfill -i .trashcan' % brick_path)
            ret, arequal, _ = g.run(node, command)
            first_brick_total = arequal.splitlines()[-1].split(':')[-1]

        # Get arequal for every brick and compare with first brick
        for brick in subvol_brick_list:
            node, brick_path = brick.split(':')
            command = ('arequal-checksum -p %s '
                       '-i .glusterfs -i .landfill -i .trashcan' % brick_path)
            ret, brick_arequal, _ = g.run(node, command)
            self.assertFalse(ret, 'Failed to get arequal on brick %s' % brick)
            g.log.info('Getting arequal for %s is successful', brick)
            brick_total = brick_arequal.splitlines()[-1].split(':')[-1]
            self.assertEqual(
                first_brick_total, brick_total,
                'Arequals for subvol and %s are not equal' % brick)
            g.log.info('Arequals for subvol and %s are equal', brick)
        g.log.info('All arequals are equal for distributed-replicated')
예제 #23
0
    def test_afr_dir_entry_creation_with_subvol_down(self):
        """
        1. Create a distributed-replicated(3X3)/distributed-arbiter(3X(2+1))
           and mount it on one client
        2. Kill 3 bricks corresponding to the 1st subvol
        3. Unmount and remount the volume on the same client
        4. Create deep dir from mount point
           mkdir -p dir1/subdir1/deepdir1
        5. Create files under dir1/subdir1/deepdir1; touch <filename>
        6. Now bring all sub-vols up by volume start force
        7. Validate backend bricks for dir creation, the subvol which is
           offline will have no dirs created, whereas other subvols will have
           dirs created from step 4
        8. Trigger heal from client by "#find . | xargs stat"
        9. Verify that the directory entries are created on all back-end bricks
        10. Create new dir (dir2) on location dir1/subdir1/deepdir1
        11. Trigger rebalance and wait for the completion
        12. Check backend bricks for all entries of dirs
        13. Check if files are getting created on the subvol which was offline
        """
        # Bring down first subvol of bricks offline
        self.subvols = get_subvols(self.mnode, self.volname)['volume_subvols']
        first_subvol = self.subvols[0]
        ret = bring_bricks_offline(self.volname, first_subvol)
        self.assertTrue(
            ret, "Unable to bring {} bricks offline".format(first_subvol))

        # Check bricks are offline or not
        ret = are_bricks_offline(self.mnode, self.volname, first_subvol)
        self.assertTrue(ret, "Bricks {} are still online".format(first_subvol))

        # Unmount and remount the volume
        ret, _, _ = umount_volume(self.mounts[0].client_system,
                                  self.mounts[0].mountpoint)
        self.assertFalse(ret, "Failed to unmount volume.")
        ret, _, _ = mount_volume(self.volname, self.mount_type,
                                 self.mounts[0].mountpoint, self.mnode,
                                 self.mounts[0].client_system)
        self.assertFalse(ret, "Failed to remount volume.")
        g.log.info('Successfully umounted and remounted volume.')

        # At this step, sleep is must otherwise file creation will fail
        sleep(2)

        # Create dir `dir1/subdir1/deepdir1` on mountpont
        directory1 = "dir1/subdir1/deepdir1"
        path = self.mounts[0].mountpoint + "/" + directory1
        ret = mkdir(self.mounts[0].client_system, path, parents=True)
        self.assertTrue(ret, "Directory {} creation failed".format(path))

        # Create files on the 2nd and 3rd subvols which are online
        brickobject = create_brickobjectlist(self.subvols, directory1)
        self.assertIsNotNone(brickobject, "Failed to get brick object list")
        self._create_number_of_files_on_the_subvol(brickobject[1],
                                                   directory1,
                                                   5,
                                                   mountpath=path)
        self._create_number_of_files_on_the_subvol(brickobject[2],
                                                   directory1,
                                                   5,
                                                   mountpath=path)

        # Bring bricks online using volume start force
        ret, _, err = volume_start(self.mnode, self.volname, force=True)
        self.assertEqual(ret, 0, err)
        g.log.info("Volume: %s started successfully", self.volname)

        # Check all bricks are online
        ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
        self.assertTrue(
            ret, "Few process after volume start are offline for "
            "volume: {}".format(self.volname))

        # Validate Directory is not created on the bricks of the subvol which
        # is offline
        for subvol in self.subvols:
            self._check_file_exists(subvol,
                                    "/" + directory1,
                                    exists=(subvol != first_subvol))

        # Trigger heal from the client
        cmd = "cd {}; find . | xargs stat".format(self.mounts[0].mountpoint)
        ret, _, err = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, err)

        # Validate the directory1 is present on all the bricks
        for subvol in self.subvols:
            self._check_file_exists(subvol, "/" + directory1, exists=True)

        # Create new dir (dir2) on location dir1/subdir1/deepdir1
        directory2 = "/" + directory1 + '/dir2'
        path = self.mounts[0].mountpoint + directory2
        ret = mkdir(self.mounts[0].client_system, path, parents=True)
        self.assertTrue(ret, "Directory {} creation failed".format(path))

        # Trigger rebalance and validate the completion
        ret, _, err = rebalance_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, err)
        g.log.info("Rebalance on volume %s started successfully", self.volname)
        ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
        self.assertTrue(
            ret, "Rebalance didn't complete on the volume: "
            "{}".format(self.volname))

        # Validate all dirs are present on all bricks in each subvols
        for subvol in self.subvols:
            for each_dir in ("/" + directory1, directory2):
                self._check_file_exists(subvol, each_dir, exists=True)

        # Validate if files are getting created on the subvol which was
        # offline
        self._create_number_of_files_on_the_subvol(brickobject[0],
                                                   directory1,
                                                   5,
                                                   mountpath=path)
    def test_subdir_mounts(self):
        """
        Mount the volume on client
        Create 2 subdir on client subdir1,subdir2
        Auth allow - Client1(subdir1),Client2(subdir2)
        Mount subdir1 on client1.Try Mounting subdir1 on client2
        Mount subdir2 on client2.Try Mounting subdir2 on client1
        """
        # Create 2 subdirectories
        cmd = ("mkdir %s/dir1 && mkdir %s/dir2" %
               (self.mounts[0].mountpoint, self.mounts[0].mountpoint))
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Failed to create directories on mountpoint")
        g.log.info("Directories created successfully on mountpoint")

        # unmount volume
        ret = self.unmount_volume(self.mounts)
        self.assertTrue(ret, "Volumes UnMount failed")
        g.log.info("Volumes UnMounted successfully")

        # Set auth allow permission on subdirs
        cmd = ("gluster volume set %s auth.allow '/dir1(%s),/dir2(%s)'" %
               (self.volname, self.clients[0], self.clients[1]))
        g.run(self.mnode, cmd)

        # Sometimes the mount command is returning exit code as 0 in case of
        # mount failures as well
        # Hence not asserting while running mount command in test case.
        # Instead asserting on basis on performing grep on mount point
        # BZ 1590711

        self.mpoint = "/mnt/Mount_Point1"

        # Test Subdir2 mount on client 1
        _, _, _ = mount_volume("%s/dir2" % self.volname, self.mount_type,
                               self.mpoint, self.mnode, self.clients[0])
        cmd = ("mount | grep %s") % self.mpoint
        ret, _, _ = g.run(self.clients[0], cmd)
        if ret == 0:
            raise ExecutionError("%s/dir2 mount should fail,"
                                 "But dir2 mounted successfully on"
                                 "unauthorized client" % self.volname)
        g.log.info("%s/dir2 is not mounted on unauthorized client",
                   self.volname)
        # Test Subdir1 mount on client 1
        _, _, _ = mount_volume("%s/dir1" % self.volname, self.mount_type,
                               self.mpoint, self.mnode, self.clients[0])
        cmd = ("mount | grep %s") % self.mpoint
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, ("%s/dir1 mount failed" % self.volname))
        g.log.info("%s/dir1 is mounted Successfully", self.volname)
        # Test Subdir1 mount on client 2
        _, _, _ = mount_volume("%s/dir1" % self.volname, self.mount_type,
                               self.mpoint, self.mnode, self.clients[1])
        cmd = ("mount | grep %s") % self.mpoint
        ret, _, _ = g.run(self.clients[1], cmd)
        if ret == 0:
            raise ExecutionError("%s/dir1 mount should fail,"
                                 "But dir1 mounted successfully on"
                                 "unauthorized client" % self.volname)
        g.log.info("%s/dir1 is not mounted on unauthorized client",
                   self.volname)
        # Test Subdir2 mount on client 2
        _, _, _ = mount_volume("%s/dir2" % self.volname, self.mount_type,
                               self.mpoint, self.mnode, self.clients[1])
        cmd = ("mount | grep %s") % self.mpoint
        ret, _, _ = g.run(self.clients[1], cmd)
        self.assertEqual(ret, 0, ("%s/dir2 mount failed" % self.volname))
        g.log.info("%s/dir2 is mounted Successfully", self.volname)
예제 #25
0
    def test_remove_brick(self):
        """
        In this test case:
        1. Trusted storage Pool of 4 nodes
        2. Create a distributed-replicated volumes with 4 bricks
        3. Start the volume
        4. Fuse mount the gluster volume on out of trusted nodes
        5. Create some data file
        6. Start remove-brick operation for one replica pair
        7. Restart glusterd on all nodes
        8. Try to commit the remove-brick operation while rebalance
           is in progress, it should fail
        """

        # pylint: disable=too-many-statements
        my_servers = self.servers[0:4]
        my_server_info = {}
        for server in self.servers[0:4]:
            my_server_info[server] = self.all_servers_info[server]
        for index in range(1, 4):
            ret, _, _ = peer_probe(self.servers[0], self.servers[index])
            self.assertEqual(ret, 0, ("peer probe from %s to %s is failed",
                                      self.servers[0], self.servers[index]))
            g.log.info("peer probe is success from %s to "
                       "%s", self.servers[0], self.servers[index])

        # Validating whether the peer are connected or not
        # In jenkins This case is failing saying peers are not in connected
        # state, that is reason adding a check whether peers are connected
        # or not
        count = 0
        while count < 30:
            ret = is_peer_connected(self.mnode, my_servers)
            if ret:
                g.log.info("Peers are in connected state")
                break
            sleep(3)
            count = count + 1
        self.assertTrue(ret, "Some peers are not in connected state")

        self.volname = "testvol"
        bricks_list = form_bricks_list(self.mnode, self.volname, 4, my_servers,
                                       my_server_info)
        g.log.info("Creating a volume %s ", self.volname)
        kwargs = {}
        kwargs['replica_count'] = 2
        ret = volume_create(self.mnode,
                            self.volname,
                            bricks_list,
                            force=False,
                            **kwargs)
        self.assertEqual(ret[0], 0, ("Unable"
                                     "to create volume %s" % self.volname))
        g.log.info("Volume created successfully %s", self.volname)

        ret, _, _ = volume_start(self.mnode, self.volname, False)
        self.assertEqual(ret, 0, ("Failed to start the "
                                  "volume %s", self.volname))
        g.log.info("Get all the bricks of the volume")
        bricks_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(bricks_list, "Failed to get the brick list")
        g.log.info("Successfully got the list of bricks of volume")

        # Mounting a volume
        ret, _, _ = mount_volume(self.volname,
                                 mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.mnode,
                                 mclient=self.mounts[0].client_system)
        self.assertEqual(ret, 0, ("Volume %s is not mounted") % self.volname)
        g.log.info("Volume mounted successfully : %s", self.volname)

        self.all_mounts_procs = []
        # Creating files
        command = ("cd %s/ ; "
                   "for i in `seq 1 10` ; "
                   "do mkdir l1_dir.$i ; "
                   "for j in `seq 1 5` ; "
                   "do mkdir l1_dir.$i/l2_dir.$j ; "
                   "for k in `seq 1 10` ; "
                   "do dd if=/dev/urandom of=l1_dir.$i/l2_dir.$j/test.$k "
                   "bs=128k count=$k ; "
                   "done ; "
                   "done ; "
                   "done ; " % (self.mounts[0].mountpoint))

        proc = g.run_async(self.mounts[0].client_system,
                           command,
                           user=self.mounts[0].user)
        self.all_mounts_procs.append(proc)
        self.io_validation_complete = False
        # Validate IO
        ret = validate_io_procs(self.all_mounts_procs, self.mounts)
        self.io_validation_complete = True
        self.assertTrue(ret, "IO failed on some of the clients")

        remove_brick_list = bricks_list[2:4]
        ret, _, _ = remove_brick(self.mnode, self.volname, remove_brick_list,
                                 'start')
        self.assertEqual(ret, 0, "Failed to start remove brick operation")
        g.log.info("Remove bricks operation started successfully")
        g.log.info("Restart glusterd on servers %s", self.servers)
        ret = restart_glusterd(self.servers)
        self.assertTrue(
            ret, ("Failed to restart glusterd on servers %s", self.servers))
        g.log.info("Successfully restarted glusterd on servers %s",
                   self.servers)

        ret, _, _ = remove_brick(self.mnode, self.volname, remove_brick_list,
                                 'commit')
        self.assertNotEqual(ret, 0, "Remove brick commit ops should be fail")
        g.log.info("Remove bricks commit operation failure is expected")
예제 #26
0
    def test_subdir_with_removebrick(self):

        # pylint: disable=too-many-statements
        """
        Mount the volume
        Create 2 subdir on client subdir1 and subdir2
        Auth allow - Client1(subdir1,subdir2),Client2(subdir1,subdir2)
        Mount the subdir to their respective clients
        Start IO's on both subdirs
        Perform remove-brick
        Validate on client if subdir's are mounted post remove-brick
        operation is performed
        """
        # Create  directories subdir1 and subdir2 on mount point
        ret = mkdir(self.mounts[0].client_system,
                    "%s/subdir1" % self.mounts[0].mountpoint)
        self.assertTrue(
            ret, ("Failed to create directory 'subdir1' in"
                  "volume %s from client %s" %
                  (self.mounts[0].volname, self.mounts[0].client_system)))
        ret = mkdir(self.mounts[0].client_system,
                    "%s/subdir2" % self.mounts[0].mountpoint)
        self.assertTrue(
            ret, ("Failed to create directory 'subdir2' in"
                  "volume %s from client %s" %
                  (self.mounts[0].volname, self.mounts[0].client_system)))
        # unmount volume
        ret = self.unmount_volume(self.mounts)
        self.assertTrue(ret, "Volumes UnMount failed")
        g.log.info("Volumes UnMounted successfully")

        # Set authentication on the subdirectory subdir1
        # and subdir2 to access by 2 clients
        g.log.info(
            'Setting authentication on subdir1 and subdir2'
            'for client %s and %s', self.clients[0], self.clients[0])
        ret = set_auth_allow(
            self.volname, self.mnode, {
                '/subdir1': [self.clients[0], self.clients[1]],
                '/subdir2': [self.clients[0], self.clients[1]]
            })
        self.assertTrue(
            ret, 'Failed to set Authentication on volume %s' % self.volume)

        self.mpoint = "/mnt/Mount_Point1"

        # Mount Subdir1 mount on client 1
        _, _, _ = mount_volume("%s/subdir1" % self.volname, self.mount_type,
                               self.mpoint, self.mnode, self.clients[0])

        # Checking subdir1 is mounted or not
        ret = is_mounted("%s/subdir1" % self.volname, self.mpoint, self.mnode,
                         self.clients[0], self.mount_type)
        self.assertTrue(ret,
                        "Volume not mounted on mount point: %s" % self.mpoint)
        g.log.info("Volume %s mounted on %s/subdir1", self.volname,
                   self.mpoint)

        # Mount Subdir2 mount on client 2
        _, _, _ = mount_volume("%s/subdir2" % self.volname, self.mount_type,
                               self.mpoint, self.mnode, self.clients[1])

        # Checking subdir2 is mounted or not
        ret = is_mounted("%s/subdir2" % self.volname, self.mpoint, self.mnode,
                         self.clients[1], self.mount_type)
        self.assertTrue(ret,
                        "Volume not mounted on mount point: %s" % self.mpoint)
        g.log.info("Volume %s mounted on %s/subdir2", self.volname,
                   self.mpoint)

        # Start IO on all the subdir mounts.
        self.subdir_mounts = [
            copy.deepcopy(self.mounts[0]),
            copy.deepcopy(self.mounts[1])
        ]
        self.subdir_mounts[0].volname = "%s/subdir1" % self.volname
        self.subdir_mounts[1].volname = "%s/subdir2" % self.volname
        all_mounts_procs = []
        count = 1
        for mount_obj in self.subdir_mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       self.mpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, self.mpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.subdir_mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.subdir_mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Perform remove brick operation when subdir is mounted on client
        g.log.info("Start removing bricks from volume")
        ret = shrink_volume(self.mnode, self.volname, rebalance_timeout=600)
        self.assertTrue(ret, ("Remove brick operation failed on "
                              "%s", self.volname))
        g.log.info("Remove brick operation is successful on "
                   "volume %s", self.volname)

        # Wait for volume processes to be online
        g.log.info("Wait for volume processes to be online")
        ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, ("All volume %s processes failed to come up "
                              "online", self.volname))
        g.log.info("All volume %s processes came up "
                   "online successfully", self.volname)

        # Log Volume Info and Status after performing remove brick
        g.log.info("Logging volume info and Status after shrinking volume")
        ret = log_volume_info_and_status(self.mnode, self.volname)
        self.assertTrue(ret, ("Logging volume info and status failed on "
                              "volume %s", self.volname))
        g.log.info("Successful in logging volume info and status of volume %s",
                   self.volname)

        # Again Checking subdir1 is mounted or not on Client 1
        ret = is_mounted("%s/subdir1" % self.volname, self.mpoint, self.mnode,
                         self.clients[0], self.mount_type)
        self.assertTrue(ret,
                        "Volume not mounted on mount point: %s" % self.mpoint)
        g.log.info("Volume %s mounted on %s/subdir1", self.volname,
                   self.mpoint)

        # Again Checking subdir2 is mounted or not on Client 2
        ret = is_mounted("%s/subdir2" % self.volname, self.mpoint, self.mnode,
                         self.clients[1], self.mount_type)
        self.assertTrue(ret,
                        "Volume not mounted on mount point: %s" % self.mpoint)
        g.log.info("Volume %s mounted on %s/subdir2", self.volname,
                   self.mpoint)
예제 #27
0
    def test_file_access(self):
        """
        Test file access.
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-locals
        # pylint: disable=too-many-statements
        mount_obj = self.mounts[0]
        mountpoint = mount_obj.mountpoint

        # get subvol list
        subvols = (get_subvols(self.mnode, self.volname))['volume_subvols']
        self.assertIsNotNone(subvols, "failed to get subvols")

        # create a file
        srcfile = mountpoint + '/testfile'
        ret, _, err = g.run(self.clients[0], ("touch %s" % srcfile))
        self.assertEqual(ret, 0, ("File creation failed for %s err %s",
                                  srcfile, err))
        g.log.info("testfile creation successful")

        # find hashed subvol
        srchashed, scount = find_hashed_subvol(subvols, "/", "testfile")
        self.assertIsNotNone(srchashed, "could not find srchashed")
        g.log.info("hashed subvol for srcfile %s subvol count %s",
                   srchashed._host, str(scount))

        # rename the file such that the new name hashes to a new subvol
        tmp = find_new_hashed(subvols, "/", "testfile")
        self.assertIsNotNone(tmp, "could not find new hashed for dstfile")
        g.log.info("dst file name : %s dst hashed_subvol : %s "
                   "subvol count : %s", tmp.newname,
                   tmp.hashedbrickobject._host, str(tmp.subvol_count))

        dstname = str(tmp.newname)
        dstfile = mountpoint + "/" + dstname
        dsthashed = tmp.hashedbrickobject
        dcount = tmp.subvol_count
        ret, _, err = g.run(self.clients[0], ("mv %s %s" %
                                              (srcfile, dstfile)))
        self.assertEqual(ret, 0, ("rename failed for %s err %s",
                                  srcfile, err))
        g.log.info("cmd: mv srcfile dstfile successful")

        # check that on dsthash_subvol the file is a linkto file
        filepath = dsthashed._fqpath + "/" + dstname
        file_stat = get_file_stat(dsthashed._host, filepath)
        self.assertEqual(file_stat['access'], "1000", ("Expected file "
                                                       "permission to be 1000"
                                                       " on subvol %s",
                                                       dsthashed._host))
        g.log.info("dsthash_subvol has the expected linkto file")

        # check on srchashed the file is a data file
        filepath = srchashed._fqpath + "/" + dstname
        file_stat = get_file_stat(srchashed._host, filepath)
        self.assertNotEqual(file_stat['access'], "1000", ("Expected file "
                                                          "permission not to"
                                                          "be 1000 on subvol"
                                                          "%s",
                                                          srchashed._host))

        # Bring down the hashed subvol of dstfile(linkto file)
        ret = bring_bricks_offline(self.volname, subvols[dcount])
        self.assertTrue(ret, ('Error in bringing down subvolume %s',
                              subvols[dcount]))
        g.log.info('dst subvol %s is offline', subvols[dcount])

        # Need to access the file through a fresh lookup through a new mount
        # create a new dir(choosing server to do a mount)
        ret, _, _ = g.run(self.mnode, ("mkdir -p /mnt"))
        self.assertEqual(ret, 0, ('mkdir of mount dir failed'))
        g.log.info("mkdir of mount dir succeeded")

        # do a temp mount
        ret = mount_volume(self.volname, self.mount_type, "/mnt",
                           self.mnode, self.mnode)
        self.assertTrue(ret, ('temporary mount failed'))
        g.log.info("temporary mount succeeded")

        # check that file is accessible (stat)
        ret, _, _ = g.run(self.mnode, ("stat /mnt/%s" % dstname))
        self.assertEqual(ret, 0, ('stat error on for dst file %s', dstname))
        g.log.info("stat on /mnt/%s successful", dstname)

        # cleanup temporary mount
        ret = umount_volume(self.mnode, "/mnt")
        self.assertTrue(ret, ('temporary mount failed'))
        g.log.info("umount successful")

        # Bring up the hashed subvol
        ret = bring_bricks_online(self.mnode, self.volname, subvols[dcount],
                                  bring_bricks_online_methods=None)
        self.assertTrue(ret, "Error in bringing back subvol online")
        g.log.info('Subvol is back online')

        # now bring down the cached subvol
        ret = bring_bricks_offline(self.volname, subvols[scount])
        self.assertTrue(ret, ('Error in bringing down subvolume %s',
                              subvols[scount]))
        g.log.info('target subvol %s is offline', subvols[scount])

        # file access should fail
        ret, _, _ = g.run(self.clients[0], ("stat %s" % dstfile))
        self.assertEqual(ret, 1, ('stat error on for file %s', dstfile))
        g.log.info("dstfile access failed as expected")
예제 #28
0
    def setUpClass(cls):
        # Calling GlusterBaseClass setUpClass
        GlusterBaseClass.setUpClass.im_func(cls)

        # Upload io scripts for running IO on mounts
        g.log.info("Upload io scripts to clients %s for running IO on mounts",
                   cls.clients)
        script_local_path = ("/usr/share/glustolibs/io/scripts/"
                             "file_dir_ops.py")
        cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
                                  "file_dir_ops.py")
        ret = upload_scripts(cls.clients, [script_local_path])
        if not ret:
            raise ExecutionError("Failed to upload IO scripts to clients %s" %
                                 cls.clients)
        g.log.info("Successfully uploaded IO scripts to clients %s",
                   cls.clients)

        cls.counter = 1
        # int: Value of counter is used for dirname-start-num argument for
        # file_dir_ops.py create_deep_dirs_with_files.

        # The --dir-length argument value for file_dir_ops.py
        # create_deep_dirs_with_files is set to 10 (refer to the cmd in setUp
        # method). This means every mount will create
        # 10 top level dirs. For every mountpoint/testcase to create new set of
        # dirs, we are incrementing the counter by --dir-length value i.e 10 in
        # this test suite.

        # If we are changing the --dir-length to new value, ensure the counter
        # is also incremented by same value to create new set of files/dirs.

        # Setup Volumes
        if cls.volume_type == "distributed-replicated":
            cls.volume_configs = []

            # Define two 2x2 distributed-replicated volumes
            for i in range(1, 3):
                cls.volume['voltype'] = {
                    'type': 'distributed-replicated',
                    'replica_count': 2,
                    'dist_count': 2,
                    'transport': 'tcp',
                }
                cls.volume_configs.append({
                    'name':
                    'testvol_%s_%d' % (cls.volume['voltype']['type'], i),
                    'servers':
                    cls.servers,
                    'voltype':
                    cls.volume['voltype']
                })

            # Define two 2x3 distributed-replicated volumes
            for i in range(1, 3):
                cls.volume['voltype'] = {
                    'type': 'distributed-replicated',
                    'replica_count': 3,
                    'dist_count': 2,
                    'transport': 'tcp',
                }
                cls.volume_configs.append({
                    'name':
                    'testvol_%s_%d' % (cls.volume['voltype']['type'], i + 2),
                    'servers':
                    cls.servers,
                    'voltype':
                    cls.volume['voltype']
                })

            # Define distributed volume
            cls.volume['voltype'] = {
                'type': 'distributed',
                'dist_count': 3,
                'transport': 'tcp',
            }
            cls.volume_configs.append({
                'name':
                'testvol_%s' % cls.volume['voltype']['type'],
                'servers':
                cls.servers,
                'voltype':
                cls.volume['voltype']
            })

            # Create and mount volumes
            cls.mount_points = []
            cls.mount_points_and_volnames = {}
            cls.client = cls.clients[0]
            for volume_config in cls.volume_configs:
                # Setup volume
                ret = setup_volume(mnode=cls.mnode,
                                   all_servers_info=cls.all_servers_info,
                                   volume_config=volume_config,
                                   force=False)
                if not ret:
                    raise ExecutionError("Failed to setup Volume"
                                         " %s" % volume_config['name'])
                g.log.info("Successful in setting volume %s",
                           volume_config['name'])

                # Mount volume
                mount_point = tempfile.mkdtemp()
                cls.mount_points.append(mount_point)
                cls.mount_points_and_volnames[volume_config['name']] = \
                    mount_point
                ret, _, _ = mount_volume(volume_config['name'], cls.mount_type,
                                         mount_point, cls.mnode, cls.client)
                if ret:
                    raise ExecutionError(
                        "Failed to do gluster mount on volume %s " %
                        cls.volname)
                g.log.info("Successfully mounted %s on client %s", cls.volname,
                           cls.client)
    def setUpClass(cls):
        # Calling GlusterBaseClass setUpClass
        GlusterBaseClass.setUpClass.im_func(cls)

        # Upload io scripts for running IO on mounts
        g.log.info("Upload io scripts to clients %s for running IO on mounts",
                   cls.clients)
        script_local_path = ("/usr/share/glustolibs/io/scripts/"
                             "file_dir_ops.py")
        cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
                                  "file_dir_ops.py")
        ret = upload_scripts(cls.clients, [script_local_path])
        if not ret:
            raise ExecutionError("Failed to upload IO scripts to clients %s" %
                                 cls.clients)
        g.log.info("Successfully uploaded IO scripts to clients %s",
                   cls.clients)

        # Setup Volumes
        if cls.volume_type == "replicated":
            cls.volume_configs = []
            cls.mounts_dict_list = []
            # Define two replicated volumes
            for i in range(1, 3):
                cls.volume['voltype'] = {
                    'type': 'replicated',
                    'replica_count': 3,
                    'arbiter_count': 1,
                    'transport': 'tcp'
                }

                volume_config = {
                    'name':
                    'testvol_%s_%d' % (cls.volume['voltype']['type'], i),
                    'servers': cls.servers,
                    'voltype': cls.volume['voltype']
                }
                cls.volume_configs.append(volume_config)

                # redefine mounts
                for client in cls.all_clients_info.keys():
                    mount = {
                        'protocol':
                        cls.mount_type,
                        'server':
                        cls.mnode,
                        'volname':
                        volume_config['name'],
                        'client':
                        cls.all_clients_info[client],
                        'mountpoint': (os.path.join(
                            "/mnt",
                            '_'.join([volume_config['name'],
                                      cls.mount_type]))),
                        'options':
                        ''
                    }
                    cls.mounts_dict_list.append(mount)

            cls.mounts = create_mount_objs(cls.mounts_dict_list)

            # Create and mount volumes
            cls.mount_points = []
            cls.client = cls.clients[0]
            for volume_config in cls.volume_configs:
                # Setup volume
                ret = setup_volume(mnode=cls.mnode,
                                   all_servers_info=cls.all_servers_info,
                                   volume_config=volume_config,
                                   force=False)
                if not ret:
                    raise ExecutionError("Failed to setup Volume %s" %
                                         volume_config['name'])
                g.log.info("Successful in setting volume %s",
                           volume_config['name'])

                # Mount volume
                mount_point = (os.path.join(
                    "/mnt", '_'.join([volume_config['name'], cls.mount_type])))
                cls.mount_points.append(mount_point)
                ret, _, _ = mount_volume(volume_config['name'], cls.mount_type,
                                         mount_point, cls.mnode, cls.client)
                if ret:
                    raise ExecutionError(
                        "Failed to do gluster mount on volume %s " %
                        cls.volname)
                g.log.info("Successfully mounted %s on client %s", cls.volname,
                           cls.client)
예제 #30
0
    def test_change_reserve_limit_to_lower_value(self):

        # pylint: disable=too-many-statements
        """
        Test Case:
        1) Create a distributed-replicated volume and start it.
        2) Enable storage.reserve option on the volume using below command:
            gluster volume set <volname> storage.reserve <value>
        3) Mount the volume on a client.
        4) Write some data on the mount points.
        5) Start remove-brick operation.
        6) While remove-brick is in-progress change the reserve limit to a
           lower value.
        """

        # Create and start a volume
        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertTrue(ret, "Failed to create and start volume")

        # Setting storage.reserve to 50
        ret = set_volume_options(self.mnode, self.volname,
                                 {'storage.reserve': '50'})
        self.assertTrue(ret,
                        "Failed to set storage reserve on %s" % self.mnode)
        g.log.info("Able to set storage reserve successfully on %s",
                   self.mnode)

        # Mounting the volume.
        ret, _, _ = mount_volume(self.volname,
                                 mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.mnode,
                                 mclient=self.mounts[0].client_system)
        self.assertEqual(ret, 0, ("Volume %s is not mounted") % self.volname)
        g.log.info("Volume mounted successfully : %s", self.volname)

        # Run IOs
        g.log.info("Starting IO on all mounts...")
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = (
                "/usr/bin/env python %s create_deep_dirs_with_files "
                "--dirname-start-num %d "
                "--dir-depth 2 "
                "--dir-length 5 "
                "--max-num-of-dirs 3 "
                "--num-of-files 10 %s" %
                (self.script_upload_path, self.counter, mount_obj.mountpoint))

            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
            self.counter = self.counter + 10

        # Validate IO
        self.assertTrue(validate_io_procs(self.all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")

        # Getting a list of all the bricks.
        g.log.info("Get all the bricks of the volume")
        self.brick_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(self.brick_list, "Failed to get the brick list")
        g.log.info("Successfully got the list of bricks of volume")

        # Removing bricks from volume.
        remove_brick_list = self.brick_list[3:6]
        ret, _, _ = remove_brick(self.mnode, self.volname, remove_brick_list,
                                 'start')
        self.assertEqual(ret, 0, "Failed to start remove brick operation.")
        g.log.info("Remove bricks operation started successfully.")

        # Setting storage.reserve to 33
        ret = set_volume_options(self.mnode, self.volname,
                                 {'storage.reserve': '33'})
        self.assertTrue(ret,
                        "Failed to set storage reserve on %s" % self.mnode)
        g.log.info("Able to set storage reserve successfully on %s",
                   self.mnode)

        # Stopping brick remove opeation.
        ret, _, _ = remove_brick(self.mnode, self.volname, remove_brick_list,
                                 'stop')
        self.assertEqual(ret, 0, "Failed to stop remove brick operation")
        g.log.info("Remove bricks operation stop successfully")