def tearDown(self):
        # Calling GlusterBaseClass teardown
        self.get_super_method(self, 'tearDown')()

        # delete created snapshots
        g.log.info("starting to delete all created snapshots")
        ret, _, _ = snap_delete_all(self.mnode)
        self.assertEqual(ret, 0, "Failed to delete all snapshots")
        g.log.info("Successfully deleted all snapshots")

        # Disable Activate on create
        option = {'activate-on-create': 'disable'}
        ret, _, _ = set_snap_config(self.mnode, option)
        if ret != 0:
            raise ExecutionError("Failed to set activateOnCreate"
                                 "config option")
        g.log.info("ActivateOnCreate config option Successfully set")

        # umount clone volume
        g.log.info("Unmounting clone volume")
        ret, _, _ = umount_volume(self.clients[0], self.mpoint1)
        if ret != 0:
            raise ExecutionError("Failed to unmount clone "
                                 "volume %s" % self.clone1)
        g.log.info("Successfully unmounted clone volume %s", self.clone1)

        ret, _, _ = umount_volume(self.clients[0], self.mpoint2)
        if ret != 0:
            raise ExecutionError("Failed to unmount clone "
                                 "volume %s" % self.clone2)
        g.log.info("Successfully unmounted clone volume %s", self.clone2)

        # cleanup volume
        g.log.info("starting to cleanup volume")
        ret1 = cleanup_volume(self.mnode, self.clone1)
        ret2 = cleanup_volume(self.mnode, self.clone2)
        if not ret1:
            raise ExecutionError("Failed to cleanup %s clone "
                                 "volume" % self.clone1)
        if not ret2:
            raise ExecutionError("Failed to cleanup %s clone "
                                 "volume" % self.clone2)
        g.log.info("Successfully cleanedup cloned volumes")

        # Unmount and cleanup-volume
        g.log.info("Starting to Unmount and cleanup-volume")
        ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
        if not ret:
            raise ExecutionError("Failed to Unmount and Cleanup Volume")
        g.log.info("Successful in Unmount Volume and Cleanup Volume")
    def tearDown(self):

        # UnMount Volume
        g.log.info("Starting to Unmount Volume %s", self.volname)
        ret = umount_volume(self.mounts[0].client_system,
                            self.mounts[0].mountpoint,
                            mtype=self.mount_type)
        self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname))
        g.log.info("Successfully Unmounted Volume %s", self.volname)

        # Clean up all volumes and peer probe to form cluster
        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                # check all bricks are online
                ret = wait_for_bricks_to_be_online(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to bring bricks online"
                                         "for volume %s" % volume)
                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume deleted successfully : %s", volume)

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe peer "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)
        self.get_super_method(self, 'tearDown')()
Example #3
0
    def tearDown(self):

        # Cleanup and umount volume
        g.log.info("Starting to Unmount Volume %s", self.volname)
        ret = umount_volume(self.mounts[0].client_system,
                            self.mounts[0].mountpoint,
                            mtype=self.mount_type)
        self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname))
        g.log.info("Successfully Unmounted Volume %s", self.volname)

        # Clean up all volumes and peer probe to form cluster
        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume deleted successfully : %s", volume)
        g.log.info("Successful in umounting the volume and Cleanup")

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)
        GlusterBaseClass.tearDown.im_func(self)
    def tearDown(self):

        # Unmount Cloned volume
        g.log.info("Starting to Unmount Cloned volume")
        for count in range(0, 2):
            self.mpoint = "/mnt/clone%s" % count
            ret, _, _ = umount_volume(self.clients[0], self.mpoint,
                                      self.mount_type)
            if ret == 1:
                raise ExecutionError("Unmounting the mount point %s failed" %
                                     self.mpoint)
            g.log.info("Mount point %s deleted successfully", self.mpoint)
        g.log.info("Unmount Volume Successful")

        # Cleanup Cloned Volumes
        g.log.info("Starting to cleanup cloned volumes")
        for clone_count in range(0, 2):
            ret = cleanup_volume(self.mnode, "clone%s" % clone_count)
            if not ret:
                raise ExecutionError("Failed to cleanup clone%s volume" %
                                     clone_count)
            g.log.info("Successful in clone%s volume cleanup", clone_count)

        # Unmount and cleanup-volume
        g.log.info("Starting to Unmount and cleanup-volume")
        ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
        if not ret:
            raise ExecutionError("Failed to Unmount and Cleanup Volume")
        g.log.info("Cleanup Volume Successfully")
    def tearDownClass(cls):
        """
        Clean up the volume and umount volume from client
        """
        # umount all volumes
        for mount_obj in cls.mounts:
            ret, _, _ = umount_volume(
                mount_obj.client_system, mount_obj.mountpoint)
            if ret:
                raise ExecutionError(
                    "Failed to umount on volume %s "
                    % cls.volname)
            g.log.info("Successfully umounted %s on client %s",
                       cls.volname, mount_obj.client_system)
            ret = rmdir(mount_obj.client_system, mount_obj.mountpoint)
            if not ret:
                raise ExecutionError(
                    ret, "Failed to remove directory mount directory.")
            g.log.info("Mount directory is removed successfully")

        # stopping all volumes
        g.log.info("Starting to Cleanup all Volumes")
        volume_list = get_volume_list(cls.mnode)
        for volume in volume_list:
            ret = cleanup_volume(cls.mnode, volume)
            if not ret:
                raise ExecutionError("Failed to cleanup Volume %s" % volume)
            g.log.info("Volume: %s cleanup is done", volume)
        g.log.info("Successfully Cleanedup all Volumes")

        # calling GlusterBaseClass tearDownClass
        cls.get_super_method(cls, 'tearDownClass')()
Example #6
0
    def _disable_io_encryption(self):
        """ Disables IO encryption """
        # UnMount Volume
        g.log.info("Starting to Unmount Volume %s", self.volname)
        ret, _, _ = umount_volume(self.mounts[0].client_system,
                                  self.mounts[0].mountpoint,
                                  mtype=self.mount_type)
        self.assertEqual(ret, 0, "Failed to Unmount volume")

        # Stop Volume
        ret, _, _ = volume_stop(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to Stop volume")

        # Disable server and client SSL usage
        options = {"server.ssl": "off",
                   "client.ssl": "off"}
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(ret, "Failed to set volume options")

        # Start Volume
        ret, _, _ = volume_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to Start volume")

        # Mount Volume
        ret, _, _ = mount_volume(self.volname, mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.mnode,
                                 mclient=self.mounts[0].client_system)
        self.assertEqual(ret, 0, "Failed to mount the volume back")
    def tearDownClass(cls):
        """
        Clean up the volume and umount volume from client
        """
        # stopping all volumes
        g.log.info("Starting to Cleanup all Volumes")
        volume_list = get_volume_list(cls.mnode)
        for volume in volume_list:
            ret = cleanup_volume(cls.mnode, volume)
            if not ret:
                raise ExecutionError("Failed to cleanup Volume %s" % volume)
            g.log.info("Volume: %s cleanup is done", volume)
        g.log.info("Successfully Cleanedup all Volumes")

        # umount all volumes
        for mount_point in cls.mount_points:
            ret, _, _ = umount_volume(cls.client, mount_point)
            if ret:
                raise ExecutionError("Failed to umount on volume %s " %
                                     cls.volname)
            g.log.info("Successfully umounted %s on client %s", cls.volname,
                       cls.client)

        # calling GlusterBaseClass tearDownClass
        GlusterBaseClass.tearDownClass.im_func(cls)
Example #8
0
    def tearDown(self):

        # Unmounting the volume.
        ret, _, _ = umount_volume(mclient=self.mounts[0].client_system,
                                  mpoint=self.mounts[0].mountpoint)
        if ret:
            raise ExecutionError("Volume %s is not unmounted" % self.volname)
        g.log.info("Volume unmounted successfully : %s", self.volname)

        # clean up all volumes
        vol_list = get_volume_list(self.mnode)
        if not vol_list:
            raise ExecutionError("Failed to get the volume list")

        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Unable to delete volume % s" % volume)
            g.log.info("Volume deleted successfully : %s", volume)

        # Cleaning the deleted volume bricks
        for brick in self.brick_list:
            node, brick_path = brick.split(r':')
            cmd = "rm -rf " + brick_path
            ret, _, _ = g.run(node, cmd)
            if ret:
                raise ExecutionError("Failed to delete the brick "
                                     "dir's of deleted volume")

        self.get_super_method(self, 'tearDown')()
    def test_mount_remove_client_logs_dir_remount(self):

        # pylint: disable=too-many-statements
        """
        1. Create all types of volumes and start them.
        2. Mount all volumes on clients.
        3. Delete /var/log/glusterfs folder on client.
        4. Run IO on all the mount points.
        5. Unmount and remount all volumes.
        6. Check if logs are regenerated or not.
        """

        # Mounting the volume.
        ret, _, _ = mount_volume(self.volname, mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.mnode,
                                 mclient=self.mounts[0].client_system)
        self.assertEqual(ret, 0, ("Volume %s is not mounted.") % self.volname)
        g.log.info("Volume mounted successfully : %s", self.volname)

        # Removing dir /var/log/glusterfs on client.
        cmd = 'mv /var/log/glusterfs /root/'
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Unable to remove /var/log/glusterfs dir.")
        g.log.info("Successfully removed /var/log/glusterfs on client: %s",
                   self.mounts[0])

        # Running IO on the mount point.
        # Creating a dir on the mount point.
        ret = mkdir(self.mounts[0].client_system,
                    self.mounts[0].mountpoint+"/dir")
        self.assertTrue(ret, "Failed to create dir.")
        g.log.info("dir created successfully for %s", self.mounts[0])

        # Creating a file on the mount point.
        cmd = ('touch  %s/file' % self.mounts[0].mountpoint)
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Failed to create file.")
        g.log.info("file created successfully for %s", self.mounts[0])

        # Unmounting and remounting volume.
        ret, _, _ = umount_volume(mclient=self.mounts[0].client_system,
                                  mpoint=self.mounts[0].mountpoint)
        self.assertEqual(ret, 0,
                         ("Volume %s is not unmounted.") % self.volname)
        g.log.info("Volume unmounted successfully : %s", self.volname)

        ret, _, _ = mount_volume(self.volname, mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.mnode,
                                 mclient=self.mounts[0].client_system)
        self.assertEqual(ret, 0, ("Volume %s is not mounted.") % self.volname)
        g.log.info("Volume mounted successfully : %s", self.volname)

        # Checking if logs are regenerated or not.
        ret = get_dir_contents(self.mounts[0].client_system,
                               '/var/log/glusterfs/')
        self.assertIsNotNone(ret, 'Log files were not regenerated.')
        g.log.info("Log files were properly regenearted.")
Example #10
0
    def tearDown(self):
        # Calling GlusterBaseClass teardown
        GlusterBaseClass.tearDown.im_func(self)

        # Disable Activate on create
        option = {'activate-on-create': 'disable'}
        ret, _, _ = set_snap_config(self.mnode, option)
        if ret != 0:
            raise ExecutionError("Failed to set activateOnCreate"
                                 "config option")
        g.log.info("ActivateOnCreate config option Successfully set")

        # umount clone volume
        g.log.info("Unmounting clone volume")
        ret, _, _ = umount_volume(self.mounts[0].client_system, self.mpoint1)
        if ret != 0:
            raise ExecutionError("Failed to unmount clone "
                                 "volume %s" % self.clone1)
        g.log.info("Successfully unmounted clone volume %s", self.clone1)

        ret, _, _ = umount_volume(self.mounts[0].client_system, self.mpoint2)
        if ret != 0:
            raise ExecutionError("Failed to unmount clone "
                                 "volume %s" % self.clone2)
        g.log.info("Successfully unmounted clone volume %s", self.clone2)

        # cleanup volume
        g.log.info("starting to cleanup volume")
        ret1 = cleanup_volume(self.mnode, self.clone1)
        ret2 = cleanup_volume(self.mnode, self.clone2)
        if not ret1:
            raise ExecutionError("Failed to cleanup %s clone "
                                 "volume" % self.clone1)
        if not ret2:
            raise ExecutionError("Failed to cleanup %s clone "
                                 "volume" % self.clone2)
        g.log.info("Successfully cleanedup cloned volumes")

        # Unmount and cleanup-volume
        g.log.info("Starting to Unmount and cleanup-volume")
        ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
        if not ret:
            raise ExecutionError("Failed to Unmount and Cleanup Volume")
        g.log.info("Successful in Unmount Volume and Cleanup Volume")
Example #11
0
def get_pathinfo(mnode, filename, volname):
    """This module gets filepath of the given file in gluster server.

    Example:
        get_pathinfo(mnode, "file1", "testvol")

    Args:
        mnode (str): Node on which cmd has to be executed.
        filename (str): relative path of file
        volname (str): volume name

    Returns:
        NoneType: None if command execution fails, parse errors.
        list: file path for the given file in gluster server
    """

    mount_point = tempfile.mkdtemp()

    # Performing glusterfs mount because only with glusterfs mount
    # the file location in gluster server can be identified
    ret, _, _ = mount_volume(volname,
                             mtype='glusterfs',
                             mpoint=mount_point,
                             mserver=mnode,
                             mclient=mnode)
    if ret != 0:
        g.log.error("Failed to do gluster mount on volume %s to fetch"
                    "pathinfo from server %s" % (volname, mnode))
        return None

    filename = mount_point + '/' + filename
    attr_name = 'trusted.glusterfs.pathinfo'
    output = get_extended_attributes_info(mnode, [filename],
                                          attr_name=attr_name)
    if output is None:
        g.log.error("Failed to get path info for %s" % filename)
        return None

    pathinfo = output[filename][attr_name]

    umount_volume(mnode, mount_point)
    g.run(mnode, "rm -rf " + mount_point)

    return re.findall(r".*?POSIX.*?:(\S+)\>", pathinfo)
Example #12
0
def teardown_samba_ctdb_cluster(servers, ctdb_volname):
    """
    Tear down samba ctdb setup

    Args:
        servers (list): Nodes in ctdb cluster to teardown entire
            cluster
        ctdb_volname (str): Name of ctdb volume

    Returns:
        bool: True if successfully tear downs ctdb cluster else false
    """

    node_file_path = "/etc/ctdb/nodes"
    publicip_file_path = "/etc/ctdb/public_addresses"
    g.log.info("Executing force cleanup...")
    # Stop ctdb service
    if stop_ctdb_service(servers):
        for mnode in servers:
            # check if nodes file is available and delete
            ret = check_file_availability(mnode, node_file_path, "nodes")
            if not ret:
                g.log.info("Failed to delete existing "
                           "nodes file in %s", mnode)
                return False
            g.log.info("Deleted existing nodes file in %s", mnode)

            # check if public_addresses file is available and delete
            ret = check_file_availability(mnode, publicip_file_path,
                                          "public_addresses")
            if not ret:
                g.log.info(
                    "Failed to delete existing public_addresses"
                    " file in %s", mnode)
                return False
            g.log.info("Deleted existing public_addresses" "file in %s", mnode)

            ctdb_mount = '/gluster/lock'
            ret, _, _ = umount_volume(mnode, ctdb_mount, 'glusterfs')
            if ret:
                g.log.error("Unable to unmount lock volume in %s", mnode)
                return False
            if not edit_hookscript_for_teardown(mnode, ctdb_volname):
                return False
        mnode = servers[0]
        ret = cleanup_volume(mnode, ctdb_volname)
        if not ret:
            g.log.error("Failed to delete ctdb volume - %s", ctdb_volname)
            return False
        return True
    return False
Example #13
0
    def tearDown(self):
        """
        If test method failed before validating IO, tearDown waits for the
        IO's to complete and checks for the IO exit status

        Cleanup and umount volume
        """
        if not self.io_validation_complete:
            g.log.info("Wait for IO to complete as IO validation did not "
                       "succeed in test method")
            ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts)
            if not ret:
                raise ExecutionError("IO failed on some of the clients")
            g.log.info("IO is successful on all mounts")

            # List all files and dirs created
            g.log.info("List all files and directories:")
            ret = list_all_files_and_dirs_mounts(self.mounts)
            if not ret:
                raise ExecutionError("Failed to list all files and dirs")
            g.log.info("Listing all files and directories is successful")

        # umount all volumes
        for mount_point in self.mount_points:
            ret, _, _ = umount_volume(self.client, mount_point)
            if ret:
                raise ExecutionError("Failed to umount on volume %s " %
                                     self.volname)
            g.log.info("Successfully umounted %s on client %s", self.volname,
                       self.client)
            ret = rmdir(self.client, mount_point)
            if not ret:
                raise ExecutionError(
                    "Failed to remove directory mount directory.")
            g.log.info("Mount directory is removed successfully")

        # stopping all volumes
        volume_list = get_volume_list(self.mnode)
        for volume in volume_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Failed to cleanup Volume %s" % volume)
            g.log.info("Volume: %s cleanup is done", volume)
        g.log.info("Successfully Cleanedup all Volumes")

        # calling GlusterBaseClass tearDown
        self.get_super_method(self, 'tearDown')()
Example #14
0
    def tearDown(self):
        """
        Unmount Volume and Volume Cleanup
        """
        for client in self.clients:
            ret, _, _ = umount_volume(client, self.mountpoint, self.mount_type)
            if ret != 0:
                raise ExecutionError("Failed to unmount volume from client"
                                     " %s" % client)
            g.log.info("Unmounted Volume from client %s successfully", client)
        ret = self.cleanup_volume()
        if not ret:
            raise ExecutionError("Failed to Cleanup the "
                                 "Volume %s" % self.volname)

        # Calling GlusterBaseClass tearDown
        self.get_super_method(self, 'tearDown')()
Example #15
0
    def _set_option_and_mount_and_unmount_volumes(self,
                                                  option="",
                                                  is_allowed=True):
        """
        Setting volume option and then mounting and unmounting the volume
        """
        # Check if an option is passed
        if option:
            # Setting the option passed as an argument
            ret = set_volume_options(self.mnode, self.volname,
                                     {option: self.mounts[0].client_system})
            self.assertTrue(
                ret, "Failed to set %s option in volume: %s" %
                (option, self.volname))
            g.log.info("Successfully set %s option in volume: %s", option,
                       self.volname)

        # Mounting a volume
        ret, _, _ = mount_volume(self.volname,
                                 mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.mnode,
                                 mclient=self.mounts[0].client_system)

        # Checking if volume was successfully mounted or not
        ret = is_mounted(self.volname,
                         mtype=self.mount_type,
                         mpoint=self.mounts[0].mountpoint,
                         mserver=self.mnode,
                         mclient=self.mounts[0].client_system)
        if is_allowed:
            self.assertTrue(ret,
                            "Failed to mount the volume: %s" % self.volname)
        else:
            self.assertFalse(
                ret, "Unexpected: Mounting"
                " the volume %s was successful" % self.volname)

        # Unmount only if the volume is supposed to be mounted
        if is_allowed:
            ret, _, _ = umount_volume(self.mounts[0].client_system,
                                      self.mounts[0].mountpoint,
                                      mtype=self.mount_type)
            self.assertEqual(ret, 0,
                             "Failed to unmount the volume: %s" % self.volname)
    def tearDown(self):
        # Reset firewall services to the zone
        if not self.firewall_added:
            ret = self._add_firewall_services(self.servers[:2])
            if not ret:
                raise ExecutionError("Failed to add firewall services")

        # Reload firewall services
        ret = self._reload_firewall_service(self.servers[:2])
        if not ret:
            raise ExecutionError("Failed to reload firewall services")

        # Cleanup the volumes and unmount it, if mounted
        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                ret = is_mounted(volume,
                                 mpoint="/mnt/distribute-vol",
                                 mserver=self.mnode,
                                 mclient=self.servers[1],
                                 mtype="glusterfs")
                if ret:
                    ret, _, _ = umount_volume(mclient=self.servers[1],
                                              mpoint="/mnt/distribute-vol")
                    if ret:
                        raise ExecutionError("Failed to unmount volume")

                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume cleaned up successfully : %s", volume)

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)
        self.get_super_method(self, 'tearDown')()
    def tearDown(self):

        # Setting storage.reserve to Default
        ret = set_volume_options(self.mnode, self.volname,
                                 {'storage.reserve': '0'})
        if not ret:
            raise ExecutionError("Failed to reset storage reserve on %s" %
                                 self.mnode)
        g.log.info("Able to reset storage reserve successfully on %s",
                   self.mnode)

        # Unmounting the volume.
        ret, _, _ = umount_volume(mclient=self.mounts[0].client_system,
                                  mpoint=self.mounts[0].mountpoint)
        if ret:
            raise ExecutionError("Volume %s is not unmounted" % self.volname)
        g.log.info("Volume unmounted successfully : %s", self.volname)
        ret = rmdir(self.mounts[0].client_system, self.mounts[0].mountpoint)
        if not ret:
            raise ExecutionError("Failed to remove directory mount directory.")
        g.log.info("Mount directory is removed successfully")

        # clean up all volumes
        vol_list = get_volume_list(self.mnode)
        if not vol_list:
            raise ExecutionError("Failed to get the volume list")

        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Unable to delete volume % s" % volume)
            g.log.info("Volume deleted successfully : %s", volume)

        # Cleaning the deleted volume bricks
        for brick in self.brick_list:
            node, brick_path = brick.split(r':')
            cmd = "rm -rf " + brick_path
            ret, _, _ = g.run(node, cmd)
            if ret:
                raise ExecutionError("Failed to delete the brick "
                                     "dir's of deleted volume")

        GlusterBaseClass.tearDown.im_func(self)
    def tearDown(self):

        # UnMount Volume
        g.log.info("Starting to Unmount Volume %s", self.volname)
        ret = umount_volume(self.mounts[0].client_system,
                            self.mounts[0].mountpoint,
                            mtype=self.mount_type)
        self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname))
        g.log.info("Successfully Unmounted Volume %s", self.volname)

        # Clean up all volumes and peer probe to form cluster
        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume deleted successfully : %s", volume)

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)

        bricks = get_servers_bricks_dict(self.servers, self.all_servers_info)

        # Checking brick dir and cleaning it.
        for server in self.servers:
            for brick in bricks[server]:
                if get_dir_contents(server, brick):
                    cmd = "rm -rf " + brick + "/*"
                    ret, _, _ = g.run(server, cmd)
                    if ret:
                        raise ExecutionError("Failed to delete the brick "
                                             "dirs of deleted volume.")

        self.get_super_method(self, 'tearDown')()
Example #19
0
    def tearDown(self):
        """
        Unmount Volume and Volume Cleanup
        """
        for client in self.clients:
            ret, _, _ = umount_volume(client, self.mountpoint, self.mount_type)
            if ret != 0:
                raise ExecutionError("Failed to unmount volume from client"
                                     " %s" % client)
            g.log.info("Unmounted Volume from client %s successfully", client)
        g.log.info("Cleaning up volume")
        ret = self.cleanup_volume()
        if not ret:
            raise ExecutionError("Failed to Cleanup the "
                                 "Volume %s" % self.volname)
        g.log.info("Volume deleted successfully " ": %s", self.volname)

        # Calling GlusterBaseClass tearDown
        GlusterBaseClass.tearDown.im_func(self)
    def tearDown(self):

        # Cleanup and umount cloned volume
        g.log.info("Starting to umount Volume")
        ret = umount_volume(self.clients[0], self.mount1)
        if not ret:
            raise ExecutionError("Failed to unmount the cloned volume")
        g.log.info("Successfully Unmounted the cloned volume")
        g.log.info("Starting to cleanup volume")
        ret = cleanup_volume(self.mnode, self.clone)
        if not ret:
            raise ExecutionError("Failed to cleanup the cloned volume")
        g.log.info("Successful in cleanup Cloned volume")

        # Unmount and cleanup original volume
        g.log.info("Starting to Unmount Volume and Cleanup Volume")
        ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
        if not ret:
            raise ExecutionError("Failed to umount the vol & cleanup Volume")
        g.log.info("Successful in umounting the volume and Cleanup")
    def tearDown(self):

        # Calling GlusterBaseClass teardown
        GlusterBaseClass.tearDown.im_func(self)

        # Cleanup cloned volume
        g.log.info("Starting to delete cloned volume")
        ret = cleanup_volume(self.mnode, self.clone)
        if not ret:
            raise ExecutionError("Failed to delete the cloned volume")
        g.log.info("Successful in deleting Cloned volume")

        # Unmount Volume
        g.log.info("Starting to Unmount Volume")
        for mount_obj in self.mounts:
            ret = umount_volume(mount_obj.client_system, self.mpoint,
                                self.mount_type)
            if not ret:
                raise ExecutionError("Failed to umount the vol "
                                     "& cleanup Volume")
            g.log.info("Successful in umounting the volume and Cleanup")
    def tearDown(self):
        cmd = ("sed -i '/transport.socket.bind-address/d'"
               " /etc/glusterfs/glusterd.vol")
        ret, _, _ = g.run(self.mnode, cmd)
        if ret:
            raise ExecutionError("Failed to remove entry from 'glusterd.vol'")
        for mount_dir in self.mount:
            ret = umount_volume(self.clients[0], mount_dir)
            if not ret:
                raise ExecutionError("Failed to cleanup Volume")

        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume deleted successfully : %s", volume)

        # Calling GlusterBaseClass tearDown
        self.get_super_method(self, 'tearDown')()
    def tearDown(self):
        """
        Clean up the volume and umount volume from client
        """
        # Unmount Volume from client
        g.log.info("Starting to Unmount volume")
        for client in self.clients:
            ret, _, _ = umount_volume(client, self.mpoint, self.mount_type)
            if ret == 1:
                raise ExecutionError("Unmounting the mount point %s failed" %
                                     self.mpoint)
            g.log.info("Unmount Volume Successful")
            cmd = ("rm -rf %s") % self.mpoint
            ret, _, _ = g.run(client, cmd)
            g.log.info("Mount point %s deleted successfully", self.mpoint)

        # cleanup-volume
        ret = self.cleanup_volume()
        if not ret:
            raise ExecutionError("Failed to Cleanup Volume")
        g.log.info("Cleanup volume %s Completed Successfully", self.volname)
    def tearDown(self):
        self.get_super_method(self, 'tearDown')()

        # Unmount the volume
        ret = umount_volume(mclient=self.mounts[0].client_system,
                            mpoint=self.mounts[0].mountpoint)
        if not ret:
            raise ExecutionError("Unable to umount the volume")
        g.log.info("Unmounting of the volume %s succeeded", self.volname)

        # The reason for volume reset is, metadata-cache is enabled
        # by group, can't disable the group in glusterfs.
        ret, _, _ = volume_reset(self.mnode, self.volname)
        if ret:
            raise ExecutionError("Unable to reset the volume {}".
                                 format(self.volname))
        g.log.info("Volume: %s reset successful ", self.volname)

        # Cleanup the volume
        if not self.cleanup_volume():
            raise ExecutionError("Unable to perform volume clenaup")
        g.log.info("Volume cleanup is successfull")
    def tearDown(self):
        """
        tearDown method for every test
        """
        # Calling GlusterBaseClass tearDown
        self.get_super_method(self, 'tearDown')()

        ret = umount_volume(mclient=self.mounts[0].client_system,
                            mpoint=self.mountpoint)
        if not ret:
            raise ExecutionError("Unable to umount the volume")
        g.log.info("Unmounting of the volume %s succeeded", self.volname)

        # Resetting the volume option set in the setup
        ret = set_volume_options(self.mnode, self.volname,
                                 {'diagnostics.client-log-level': 'INFO'})
        if not ret:
            raise ExecutionError("Unable to set the client log level to INFO")
        g.log.info("Volume option is set successfully.")

        ret = self.cleanup_volume()
        if not ret:
            raise ExecutionError("Unable to perform volume clenaup")
        g.log.info("Volume cleanup is successfull")
Example #26
0
    def trigger_heal_from_mount_point(self):
        """
        Trigger heal from mount point using read.
        """
        # Unmouting and remounting volume to update the volume graph
        # in client.
        ret, _, _ = umount_volume(
            self.mounts[0].client_system, self.mounts[0].mountpoint)
        self.assertFalse(ret, "Failed to unmount volume.")

        ret, _, _ = mount_volume(
            self.volname, 'glusterfs', self.mounts[0].mountpoint,
            self.mnode, self.mounts[0].client_system)
        self.assertFalse(ret, "Failed to remount volume.")
        g.log.info('Successfully umounted and remounted volume.')

        # Trigger heal from client side
        cmd = ("/usr/bin/env python {0} read {1}/{2}".format(
            self.script_upload_path, self.mounts[0].mountpoint,
            self.test_meta_data_self_heal_folder))
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertFalse(ret, 'Failed to trigger heal on %s'
                         % self.mounts[0].client_system)
        g.log.info("Successfully triggered heal from mount point.")
    def tearDown(self):

        # Resetting the /var/log/glusterfs on client
        # and archiving the present one.
        cmd = ('for file in `ls /var/log/glusterfs/`; do '
               'mv /var/log/glusterfs/$file'
               ' /var/log/glusterfs/`date +%s`-$file; done')
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Renaming all files failed")
        g.log.info("Successfully renamed files in"
                   " /var/log/glusterfs on client: %s",
                   self.mounts[0].client_system)
        cmd = ('mv /root/glusterfs/* /var/log/glusterfs/;'
               'rm -rf /root/glusterfs')
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0,
                         "Failed to move old files back to /var/log/glusterfs")
        g.log.info("Successfully moved files in"
                   " /var/log/glusterfs on client: %s",
                   self.mounts[0])

        # Unmounting the volume.
        ret, _, _ = umount_volume(mclient=self.mounts[0].client_system,
                                  mpoint=self.mounts[0].mountpoint)
        if ret:
            raise ExecutionError("Volume %s is not unmounted" % self.volname)
        g.log.info("Volume unmounted successfully : %s", self.volname)

        # clean up all volumes
        ret = self.cleanup_volume()
        if not ret:
            raise ExecutionError("Unable to delete volume % s"
                                 % self.volname)
        g.log.info("Volume deleted successfully : %s", self.volname)

        self.get_super_method(self, 'tearDown')()
    def tearDown(self):

        # Start rebalance for volume.
        g.log.info("Stopping rebalance on the volume")
        ret, _, _ = rebalance_stop(self.mnode, self.volname)
        if ret:
            raise ExecutionError("Failed to stop rebalance " "on the volume .")
        g.log.info("Successfully stopped rebalance on the volume %s",
                   self.volname)

        # Peer probe node which was detached
        ret, _, _ = peer_probe(self.mnode, self.servers[4])
        if ret:
            raise ExecutionError("Failed to probe %s" % self.servers[4])
        g.log.info("Peer probe successful %s", self.servers[4])

        # Wait till peers are in connected state
        count = 0
        while count < 60:
            ret = is_peer_connected(self.mnode, self.servers)
            if ret:
                break
            sleep(3)

        # Unmounting and cleaning volume
        ret, _, _ = umount_volume(mclient=self.mounts[0].client_system,
                                  mpoint=self.mounts[0].mountpoint)
        if ret:
            raise ExecutionError("Unable to unmount volume %s" % self.volname)
        g.log.info("Volume unmounted successfully  %s", self.volname)

        ret = cleanup_volume(self.mnode, self.volname)
        if not ret:
            raise ExecutionError("Unable to delete volume %s" % self.volname)
        g.log.info("Volume deleted successfully  %s", self.volname)
        GlusterBaseClass.tearDown.im_func(self)
Example #29
0
    def test_server_side_healing_happens_only_when_glustershd_running(self):
        """
        Test Script which verifies that the server side healing must happen
        only if the heal daemon is running on the node where source brick
        resides.

         * Create and start the Replicate volume
         * Check the glustershd processes - Only 1 glustershd should be listed
         * Bring down the bricks without affecting the cluster
         * Create files on volume
         * kill the glustershd on node where bricks is running
         * bring the bricks up which was killed in previous steps
         * check the heal info - heal info must show pending heal info, heal
           shouldn't happen since glustershd is down on source node
         * issue heal
         * trigger client side heal
         * heal should complete successfully
        """
        # pylint: disable=too-many-locals,too-many-statements,too-many-lines
        # Setting Volume options
        options = {
            "metadata-self-heal": "on",
            "entry-self-heal": "on",
            "data-self-heal": "on"
        }
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(ret, 'Failed to set options %s' % options)
        g.log.info("Successfully set %s for volume %s", options, self.volname)

        # Check the self-heal daemon process
        ret, pids = get_self_heal_daemon_pid(self.servers)
        self.assertTrue(ret, ("Either No self heal daemon process found or "
                              "more than One self heal daemon process "
                              "found : %s" % pids))
        g.log.info(
            "Successful in verifying self heal daemon process"
            " on all nodes %s", self.servers)

        # Select the bricks to bring offline
        bricks_to_bring_offline = (select_volume_bricks_to_bring_offline(
            self.mnode, self.volname))
        g.log.info("Brick List to bring offline : %s", bricks_to_bring_offline)

        # Bring down the selected bricks
        ret = bring_bricks_offline(self.volname, bricks_to_bring_offline)
        self.assertTrue(ret, "Failed to bring down the bricks")
        g.log.info("Brought down the brick process "
                   "for %s", bricks_to_bring_offline)

        # Write files on all mounts
        all_mounts_procs, num_files_to_write = [], 100
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s create_files "
                   "-f %s --base-file-name file %s" %
                   (self.script_upload_path, num_files_to_write,
                    mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # Validate IO
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("IO is successful on all mounts")

        # Get online bricks list
        online_bricks = get_online_bricks_list(self.mnode, self.volname)
        g.log.info("Online Bricks for volume %s : %s", self.volname,
                   online_bricks)

        # Get the nodes where bricks are running
        bring_offline_glustershd_nodes = []
        for brick in online_bricks:
            bring_offline_glustershd_nodes.append(brick.split(":")[0])
        g.log.info("self heal deamon on nodes %s to be killed",
                   bring_offline_glustershd_nodes)

        # Kill the self heal daemon process on nodes
        ret = bring_self_heal_daemon_process_offline(
            bring_offline_glustershd_nodes)
        self.assertTrue(
            ret, ("Unable to bring self heal daemon process"
                  " offline for nodes %s" % bring_offline_glustershd_nodes))
        g.log.info(
            "Sucessfully brought down self heal process for "
            "nodes %s", bring_offline_glustershd_nodes)

        # Check the heal info
        heal_info = get_heal_info_summary(self.mnode, self.volname)
        g.log.info("Successfully got heal info %s for the volume %s",
                   heal_info, self.volname)

        # Bring bricks online
        ret = bring_bricks_online(self.mnode, self.volname,
                                  bricks_to_bring_offline, 'glusterd_restart')
        self.assertTrue(
            ret,
            ("Failed to bring bricks: %s online" % bricks_to_bring_offline))

        # Issue heal
        ret = trigger_heal_full(self.mnode, self.volname)
        self.assertFalse(ret,
                         ("Able to trigger heal on volume %s where "
                          "self heal daemon is not running" % self.volname))
        g.log.info(
            "Expected : Unable to trigger heal on volume %s where "
            "self heal daemon is not running", self.volname)

        # Wait for 130 sec to heal
        ret = monitor_heal_completion(self.mnode, self.volname, 130)
        self.assertFalse(ret, ("Heal Completed on volume %s" % self.volname))
        g.log.info("Expected : Heal pending on volume %s", self.volname)

        # Check the heal info
        heal_info_after_triggering_heal = get_heal_info_summary(
            self.mnode, self.volname)
        g.log.info("Successfully got heal info for the volume %s",
                   self.volname)

        # Compare with heal pending with the files wrote
        for node in online_bricks:
            self.assertGreaterEqual(
                int(heal_info_after_triggering_heal[node]['numberOfEntries']),
                num_files_to_write,
                ("Some of the files are healed from source bricks %s where "
                 "self heal daemon is not running" % node))
        g.log.info("EXPECTED: No files are healed from source bricks where "
                   "self heal daemon is not running")

        # Unmount and Mount volume again as volume options were set
        # after mounting the volume
        for mount_obj in self.mounts:
            ret, _, _ = umount_volume(mount_obj.client_system,
                                      mount_obj.mountpoint)
            self.assertEqual(ret, 0,
                             "Failed to unmount %s" % mount_obj.client_system)
            ret, _, _ = mount_volume(self.volname,
                                     mtype='glusterfs',
                                     mpoint=mount_obj.mountpoint,
                                     mserver=self.mnode,
                                     mclient=mount_obj.client_system)
            self.assertEqual(ret, 0,
                             "Failed to mount %s" % mount_obj.client_system)

        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s read %s" %
                   (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # Validate IO
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "Reads failed on some of the clients")
        g.log.info("Reads successful on all mounts")

        # Wait for heal to complete
        ret = monitor_heal_completion(self.mnode, self.volname)
        self.assertTrue(ret, "Unable to heal the pending entries")
        g.log.info("Successfully healed the pending entries for volume %s",
                   self.volname)
    def test_create_vol_used_bricks(self):
        '''
        -> Create distributed-replica Volume
        -> Add 6 bricks to the volume
        -> Mount the volume
        -> Perform some I/O's on mount point
        -> unmount the volume
        -> Stop and delete the volume
        -> Create another volume using bricks of deleted volume
        '''

        # Create and start a volume
        self.volume['name'] = "test_create_vol_with_fresh_bricks"
        self.volname = "test_create_vol_with_fresh_bricks"
        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertTrue(ret, "Failed to create and start volume")

        # Forming brick list
        brick_list = form_bricks_list(self.mnode, self.volname, 6,
                                      self.servers, self.all_servers_info)
        # Adding bricks to the volume
        ret, _, _ = add_brick(self.mnode, self.volname, brick_list)
        self.assertEqual(
            ret, 0, "Failed to add bricks to the volume %s" % self.volname)
        g.log.info("Bricks added successfully to the volume %s", self.volname)

        # Mounting the volume.
        for mount_obj in self.mounts:
            ret, _, _ = mount_volume(self.volname,
                                     mtype=self.mount_type,
                                     mpoint=mount_obj.mountpoint,
                                     mserver=self.mnode,
                                     mclient=mount_obj.client_system)
            self.assertEqual(ret, 0,
                             ("Volume %s is not mounted") % (self.volname))
            g.log.info("Volume mounted successfully : %s", self.volname)

        # run IOs
        g.log.info("Starting IO on all mounts...")
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = (
                "/usr/bin/env python %s create_deep_dirs_with_files "
                "--dirname-start-num %d --dir-depth 2 "
                "--dir-length 5 --max-num-of-dirs 3 "
                "--num-of-files 10 %s" %
                (self.script_upload_path, self.counter, mount_obj.mountpoint))

            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
            self.counter = self.counter + 10

        # Validate IO
        self.assertTrue(validate_io_procs(self.all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")

        # Unmouting the volume.
        for mount_obj in self.mounts:
            ret, _, _ = umount_volume(mclient=mount_obj.client_system,
                                      mpoint=mount_obj.mountpoint)
            self.assertEqual(ret, 0,
                             "Volume %s is not unmounted" % (self.volname))
            g.log.info("Volume unmounted successfully : %s", self.volname)

        # Getting brick list
        self.brick_list = get_all_bricks(self.mnode, self.volname)
        if not self.brick_list:
            raise ExecutionError("Failed to get the brick list of %s" %
                                 self.volname)

        # Stop volume
        ret, _, _ = volume_stop(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to stop the volume %s" % self.volname)
        g.log.info("Volume %s stopped successfully", self.volname)

        # Delete Volume
        ret, _, _ = g.run(
            self.mnode,
            "gluster volume delete %s --mode=script" % self.volname)
        self.assertEqual(ret, 0, "Failed to delete volume %s" % self.volname)
        g.log.info("Volume deleted successfully %s", self.volname)

        # Create another volume by using bricks of deleted volume
        self.volname = "test_create_vol_used_bricks"
        ret, _, err = volume_create(self.mnode,
                                    self.volname,
                                    brick_list[0:6],
                                    replica_count=3)
        self.assertNotEqual(
            ret, 0, "Volume creation should fail with used "
            "bricks but volume creation success")
        g.log.info("Failed to create volume with used bricks")

        # Checking failed message of volume creation
        msg = ' '.join([
            'volume create: test_create_vol_used_bricks: failed:',
            brick_list[0].split(':')[1], 'is already part of a volume'
        ])
        self.assertIn(
            msg, err, "Incorrect error message for volume creation "
            "with used bricks")
        g.log.info("correct error message for volume creation with "
                   "used bricks")