Пример #1
0
        def mount_clone_and_io(clone, mpoint):
            # define mounts
            self.mount_points = []
            self.mounts_dict_list = []
            for client in self.all_clients_info:
                mount = {
                    'protocol': self.mount_type,
                    'server': self.mnode,
                    'volname': clone,
                    'client': self.all_clients_info[client],
                    'mountpoint': (os.path.join("%s" % mpoint)),
                    'options': ''
                }
                self.mounts_dict_list.append(mount)
            self.mounts1 = create_mount_objs(self.mounts_dict_list)
            g.log.info("Successfully made entry in self.mounts")
            # Mounting a volume
            g.log.info("Starting to mount volume")
            ret = mount_volume(clone, self.mount_type, mpoint, self.mnode,
                               self.clients[0])
            self.assertTrue(ret, "Volume mount failed for clone1")
            g.log.info("%s mounted Successfully", clone)

            # Checking volume mounted or not
            ret = is_mounted(clone, mpoint, self.mnode, self.clients[0],
                             self.mount_type)
            self.assertTrue(ret,
                            "Volume not mounted on mount point: %s" % mpoint)
            g.log.info("Volume %s mounted on %s", clone, mpoint)
            return 0
    def setUpClass(cls):
        """Initialize all the variables necessary for testing Gluster."""
        # Get all servers
        cls.all_servers = None
        if 'servers' in g.config and g.config['servers']:
            cls.all_servers = g.config['servers']
            cls.servers = cls.all_servers
        else:
            raise ConfigError("'servers' not defined in the global config")

        # Get all slaves
        cls.slaves = None
        if g.config.get('slaves'):
            cls.slaves = g.config['slaves']
            # Set mnode_slave : Node on which slave commands are executed
            cls.mnode_slave = cls.slaves[0]
            # Slave IP's
            cls.slaves_ip = cls.get_ip_from_hostname(cls.slaves)

        # Get all clients
        cls.all_clients = None
        if g.config.get('clients'):
            cls.all_clients = g.config['clients']
            cls.clients = cls.all_clients
        else:
            raise ConfigError("'clients' not defined in the global config")

        # Get all servers info
        cls.all_servers_info = None
        if g.config.get('servers_info'):
            cls.all_servers_info = g.config['servers_info']
        else:
            raise ConfigError("'servers_info' not defined in the global "
                              "config")
        # Get all slaves info
        cls.all_slaves_info = None
        if g.config.get('slaves_info'):
            cls.all_slaves_info = g.config['slaves_info']

        # All clients_info
        cls.all_clients_info = None
        if g.config.get('clients_info'):
            cls.all_clients_info = g.config['clients_info']
        else:
            raise ConfigError("'clients_info' not defined in the global "
                              "config")

        # get lv list
        cls.lv_list = cls.get_unique_lv_list_from_all_servers()

        # Set mnode : Node on which gluster commands are executed
        cls.mnode = cls.all_servers[0]

        # Server IP's
        cls.servers_ips = cls.get_ip_from_hostname(cls.servers)

        # SMB Cluster info
        try:
            cls.smb_users_info = (
                g.config['gluster']['cluster_config']['smb']['users_info'])
        except KeyError:
            cls.smb_users_info = {}
            cls.smb_users_info['root'] = {}
            cls.smb_users_info['root']['password'] = '******'
            cls.smb_users_info['root']['acl'] = 'rwx'

        # NFS-Ganesha Cluster info
        try:
            cls.enable_nfs_ganesha = (
                g.config['gluster']['cluster_config']['nfs_ganesha']['enable']
                in ('TRUE', 'True', 'true', 'YES', 'Yes', 'yes', '1', 1))
            cls.num_of_nfs_ganesha_nodes = g.config['gluster'][
                'cluster_config']['nfs_ganesha']['num_of_nfs_ganesha_nodes']
            cls.vips = (
                g.config['gluster']['cluster_config']['nfs_ganesha']['vips'])
        except KeyError:
            cls.enable_nfs_ganesha = False
            cls.num_of_nfs_ganesha_nodes = None
            cls.vips = []

        # Geo-rep Cluster information
        try:
            cls.geo_rep_info = (
                g.config['gluster']['geo_rep']['cluster_config'])
        except KeyError:
            cls.geo_rep_info = {}
            cls.geo_rep_info['root'] = {}
            cls.geo_rep_info['user'] = {}
            cls.geo_rep_info['root']['password'] = ''
            cls.geo_rep_info['user']['name'] = ''
            cls.geo_rep_info['user']['password'] = ''
            cls.geo_rep_info['user']['group'] = ''

        # Defining default volume_types configuration.
        cls.default_volume_type_config = {
            'replicated': {
                'type': 'replicated',
                'replica_count': 3,
                'transport': 'tcp',
            },
            'dispersed': {
                'type': 'dispersed',
                'disperse_count': 6,
                'redundancy_count': 2,
                'transport': 'tcp',
            },
            'distributed': {
                'type': 'distributed',
                'dist_count': 4,
                'transport': 'tcp',
            },
            'distributed-replicated': {
                'type': 'distributed-replicated',
                'dist_count': 2,
                'replica_count': 3,
                'transport': 'tcp',
            },
            'distributed-dispersed': {
                'type': 'distributed-dispersed',
                'dist_count': 2,
                'disperse_count': 6,
                'redundancy_count': 2,
                'transport': 'tcp',
            },
            'arbiter': {
                'type': 'arbiter',
                'replica_count': 3,
                'arbiter_count': 1,
                'transport': 'tcp',
            },
            'distributed-arbiter': {
                'type': 'distributed-arbiter',
                'dist_count': 2,
                'replica_count': 3,
                'arbiter_count': 1,
                'tranport': 'tcp',
            }
        }

        # Check if default volume_type configuration is provided in config yml
        if g.config.get('gluster', {}).get('volume_types'):
            default_volume_type_from_config = (
                g.config['gluster']['volume_types'])
            for volume_type in default_volume_type_from_config.keys():
                if default_volume_type_from_config[volume_type]:
                    if volume_type in cls.default_volume_type_config:
                        cls.default_volume_type_config[volume_type] = (
                            default_volume_type_from_config[volume_type])

        # Create Volume with force option
        cls.volume_create_force = False
        if g.config.get('gluster', {}).get('volume_create_force'):
            cls.volume_create_force = (
                g.config['gluster']['volume_create_force'])

        # Default volume options which is applicable for all the volumes
        cls.volume_options = {}
        if g.config.get('gluster', {}).get('volume_options'):
            cls.volume_options = g.config['gluster']['volume_options']

        # If the volume is exported as SMB Share, then set the following
        # volume options on the share.
        cls.smb_share_options = {}
        if g.config.get('gluster', {}).get('smb_share_options'):
            cls.smb_share_options = g.config['gluster']['smb_share_options']

        # If the volume is exported as NFS-Ganesha export,
        # then set the following volume options on the export.
        cls.nfs_ganesha_export_options = {}
        if g.config.get('gluster', {}).get('nfs_ganesha_export_options'):
            cls.nfs_ganesha_export_options = (
                g.config['gluster']['nfs_ganesha_export_options'])

        # Get the volume configuration.
        cls.volume = {}
        if cls.volume_type:
            for volume in g.config.get('gluster', {}).get('volumes', []):
                if volume['voltype']['type'] == cls.volume_type:
                    cls.volume = deepcopy(volume)
                    if 'name' not in cls.volume:
                        cls.volume['name'] = 'testvol_%s' % cls.volume_type
                    if 'servers' not in cls.volume:
                        cls.volume['servers'] = cls.all_servers
                    break
            else:
                try:
                    if g.config['gluster']['volume_types'][cls.volume_type]:
                        cls.volume['voltype'] = (
                            g.config['gluster']['volume_types'][
                                cls.volume_type])
                except KeyError:
                    try:
                        cls.volume['voltype'] = (
                            cls.default_volume_type_config[cls.volume_type])
                    except KeyError:
                        raise ConfigError(
                            "Unable to get configs of volume "
                            "type: %s", cls.volume_type)
                cls.volume['name'] = 'testvol_%s' % cls.volume_type
                cls.volume['servers'] = cls.all_servers

            # Set volume options
            if 'options' not in cls.volume:
                cls.volume['options'] = cls.volume_options

            # Define Volume Useful Variables.
            cls.volname = cls.volume['name']
            cls.voltype = cls.volume['voltype']['type']
            cls.servers = cls.volume['servers']
            cls.mnode = cls.servers[0]
            cls.vol_options = cls.volume['options']

            # Define useful variable for geo-rep volumes.
            if cls.slaves:
                # For master volume
                cls.master_volume = cls.volume
                cls.master_volume['name'] = ('master_testvol_%s' %
                                             cls.volume_type)
                cls.master_volname = cls.master_volume['name']
                cls.master_voltype = (cls.master_volume['voltype']['type'])

                # For slave volume
                cls.slave_volume = deepcopy(cls.volume)
                cls.slave_volume['name'] = ('slave_testvol_%s' %
                                            cls.volume_type)
                cls.slave_volume['servers'] = cls.slaves
                cls.slave_volname = cls.slave_volume['name']
                cls.slave_voltype = (cls.slave_volume['voltype']['type'])

        # Get the mount configuration.
        cls.mounts = []
        if cls.mount_type:
            cls.mounts_dict_list = []
            for mount in g.config.get('gluster', {}).get('mounts', []):
                if mount['protocol'] != cls.mount_type:
                    continue
                temp_mount = {
                    'protocol': cls.mount_type,
                    'volname': cls.volname,
                }
                if mount.get('volname'):
                    if mount['volname'] == cls.volname:
                        temp_mount = deepcopy(mount)
                    else:
                        continue
                temp_mount.update({
                    'server':
                    mount.get('server', cls.mnode),
                    'mountpoint':
                    mount.get(
                        'mountpoint',
                        path_join("/mnt",
                                  '_'.join([cls.volname, cls.mount_type]))),
                    'client':
                    mount.get(
                        'client', cls.all_clients_info[random_choice(
                            list(cls.all_clients_info.keys()))]),
                    'options':
                    mount.get('options', ''),
                })
                cls.mounts_dict_list.append(temp_mount)

            if not cls.mounts_dict_list:
                for client in cls.all_clients_info.keys():
                    cls.mounts_dict_list.append({
                        'protocol':
                        cls.mount_type,
                        'server':
                        cls.mnode,
                        'volname':
                        cls.volname,
                        'client':
                        cls.all_clients_info[client],
                        'mountpoint':
                        path_join("/mnt",
                                  '_'.join([cls.volname, cls.mount_type])),
                        'options':
                        '',
                    })

            if cls.mount_type == 'cifs' or cls.mount_type == 'smb':
                for mount in cls.mounts_dict_list:
                    if 'smbuser' not in mount:
                        mount['smbuser'] = random_choice(
                            list(cls.smb_users_info.keys()))
                        mount['smbpasswd'] = (
                            cls.smb_users_info[mount['smbuser']]['password'])

            cls.mounts = create_mount_objs(cls.mounts_dict_list)

            # Setting mounts for geo-rep volumes.
            if cls.slaves:

                # For master volume mount
                cls.master_mounts = cls.mounts

                # For slave volume mount
                slave_mount_dict_list = deepcopy(cls.mounts_dict_list)
                for mount_dict in slave_mount_dict_list:
                    mount_dict['volname'] = cls.slave_volume
                    mount_dict['server'] = cls.mnode_slave
                    mount_dict['mountpoint'] = path_join(
                        "/mnt", '_'.join([cls.slave_volname, cls.mount_type]))
                cls.slave_mounts = create_mount_objs(slave_mount_dict_list)

            # Defining clients from mounts.
            cls.clients = []
            for mount in cls.mounts_dict_list:
                cls.clients.append(mount['client']['host'])
            cls.clients = list(set(cls.clients))

        # Gluster Logs info
        cls.server_gluster_logs_dirs = ["/var/log/glusterfs", "/var/log/samba"]
        cls.server_gluster_logs_files = [
            "/var/log/ganesha.log", "/var/log/ganesha-gfapi.log"
        ]
        if g.config.get('gluster', {}).get('server_gluster_logs_info'):
            server_gluster_logs_info = (
                g.config['gluster']['server_gluster_logs_info'])
            if server_gluster_logs_info.get('dirs'):
                cls.server_gluster_logs_dirs = server_gluster_logs_info['dirs']
            if server_gluster_logs_info.get('files'):
                cls.server_gluster_logs_files = (
                    server_gluster_logs_info['files'])

        cls.client_gluster_logs_dirs = ["/var/log/glusterfs"]
        cls.client_gluster_logs_files = []
        if g.config.get('gluster', {}).get('client_gluster_logs_info'):
            client_gluster_logs_info = (
                g.config['gluster']['client_gluster_logs_info'])
            if client_gluster_logs_info.get('dirs'):
                cls.client_gluster_logs_dirs = client_gluster_logs_info['dirs']
            if client_gluster_logs_info.get('files'):
                cls.client_gluster_logs_files = (
                    client_gluster_logs_info['files'])

        # Have a unique string to recognize the test run for logging in
        # gluster logs
        if 'glustotest_run_id' not in g.config:
            g.config['glustotest_run_id'] = (
                datetime.now().strftime('%H_%M_%d_%m_%Y'))
        cls.glustotest_run_id = g.config['glustotest_run_id']

        msg = "Setupclass: %s : %s" % (cls.__name__, cls.glustotest_run_id)
        g.log.info(msg)
        cls.inject_msg_in_gluster_logs(msg)

        # Log the baseclass variables for debugging purposes
        g.log.debug("GlusterBaseClass Variables:\n %s", cls.__dict__)
    def setUpClass(cls):
        # Calling GlusterBaseClass setUpClass
        GlusterBaseClass.setUpClass.im_func(cls)

        # Upload io scripts for running IO on mounts
        g.log.info("Upload io scripts to clients %s for running IO on mounts",
                   cls.clients)
        script_local_path = ("/usr/share/glustolibs/io/scripts/"
                             "file_dir_ops.py")
        cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
                                  "file_dir_ops.py")
        ret = upload_scripts(cls.clients, [script_local_path])
        if not ret:
            raise ExecutionError("Failed to upload IO scripts to clients %s" %
                                 cls.clients)
        g.log.info("Successfully uploaded IO scripts to clients %s",
                   cls.clients)

        # Setup Volumes
        if cls.volume_type == "replicated":
            cls.volume_configs = []
            cls.mounts_dict_list = []
            # Define two replicated volumes
            for i in range(1, 3):
                cls.volume['voltype'] = {
                    'type': 'replicated',
                    'replica_count': 3,
                    'arbiter_count': 1,
                    'transport': 'tcp'
                }

                volume_config = {
                    'name':
                    'testvol_%s_%d' % (cls.volume['voltype']['type'], i),
                    'servers': cls.servers,
                    'voltype': cls.volume['voltype']
                }
                cls.volume_configs.append(volume_config)

                # redefine mounts
                for client in cls.all_clients_info.keys():
                    mount = {
                        'protocol':
                        cls.mount_type,
                        'server':
                        cls.mnode,
                        'volname':
                        volume_config['name'],
                        'client':
                        cls.all_clients_info[client],
                        'mountpoint': (os.path.join(
                            "/mnt",
                            '_'.join([volume_config['name'],
                                      cls.mount_type]))),
                        'options':
                        ''
                    }
                    cls.mounts_dict_list.append(mount)

            cls.mounts = create_mount_objs(cls.mounts_dict_list)

            # Create and mount volumes
            cls.mount_points = []
            cls.client = cls.clients[0]
            for volume_config in cls.volume_configs:
                # Setup volume
                ret = setup_volume(mnode=cls.mnode,
                                   all_servers_info=cls.all_servers_info,
                                   volume_config=volume_config,
                                   force=False)
                if not ret:
                    raise ExecutionError("Failed to setup Volume %s" %
                                         volume_config['name'])
                g.log.info("Successful in setting volume %s",
                           volume_config['name'])

                # Mount volume
                mount_point = (os.path.join(
                    "/mnt", '_'.join([volume_config['name'], cls.mount_type])))
                cls.mount_points.append(mount_point)
                ret, _, _ = mount_volume(volume_config['name'], cls.mount_type,
                                         mount_point, cls.mnode, cls.client)
                if ret:
                    raise ExecutionError(
                        "Failed to do gluster mount on volume %s " %
                        cls.volname)
                g.log.info("Successfully mounted %s on client %s", cls.volname,
                           cls.client)
Пример #4
0
    def setUpClass(cls):
        """Setup volume exports volume with nfs-ganesha,
            mounts the volume.
        """
        NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)

        # Peer probe servers
        ret = peer_probe_servers(cls.mnode, cls.servers)
        if not ret:
            raise ExecutionError("Failed to peer probe servers")

        g.log.info("All peers are in connected state")

        # Peer Status from mnode
        peer_status(cls.mnode)

        for server in cls.servers:
            mount_info = [{
                'protocol': 'glusterfs',
                'mountpoint': '/run/gluster/shared_storage',
                'server': server,
                'client': {
                    'host': server
                },
                'volname': 'gluster_shared_storage',
                'options': ''
            }]

            mount_obj = create_mount_objs(mount_info)
            if not mount_obj[0].is_mounted():
                ret = mount_obj[0].mount()
                if not ret:
                    raise ExecutionError(
                        "Unable to mount volume '%s:%s' "
                        "on '%s:%s'" %
                        (mount_obj.server_system, mount_obj.volname,
                         mount_obj.client_system, mount_obj.mountpoint))

        # Setup Volume
        ret = setup_volume(mnode=cls.mnode,
                           all_servers_info=cls.all_servers_info,
                           volume_config=cls.volume,
                           force=True)
        if not ret:
            raise ExecutionError("Setup volume %s failed", cls.volume)
        time.sleep(10)

        # Export volume with nfs ganesha, if it is not exported already
        vol_option = get_volume_options(cls.mnode,
                                        cls.volname,
                                        option='ganesha.enable')
        if vol_option is None:
            raise ExecutionError("Failed to get ganesha.enable volume option "
                                 "for %s " % cls.volume)
        if vol_option['ganesha.enable'] != 'on':
            ret, out, err = export_nfs_ganesha_volume(mnode=cls.mnode,
                                                      volname=cls.volname)
            if ret != 0:
                raise ExecutionError(
                    "Failed to export volume %s "
                    "as NFS export", cls.volname)
            time.sleep(5)

        ret = wait_for_nfs_ganesha_volume_to_get_exported(
            cls.mnode, cls.volname)
        if not ret:
            raise ExecutionError("Failed to export volume %s. volume is "
                                 "not listed in showmount" % cls.volname)
        else:
            g.log.info("Volume %s is exported successfully" % cls.volname)

        # Log Volume Info and Status
        ret = log_volume_info_and_status(cls.mnode, cls.volname)
        if not ret:
            raise ExecutionError("Logging volume %s info and status failed",
                                 cls.volname)

        # Create Mounts
        _rc = True
        for mount_obj in cls.mounts:
            ret = mount_obj.mount()
            if not ret:
                g.log.error("Unable to mount volume '%s:%s' on '%s:%s'",
                            mount_obj.server_system, mount_obj.volname,
                            mount_obj.client_system, mount_obj.mountpoint)
                _rc = False
        if not _rc:
            raise ExecutionError("Mounting volume %s on few clients failed",
                                 cls.volname)

        # Get info of mount before the IO
        log_mounts_info(cls.mounts)
Пример #5
0
    def test_mount_snap_delete(self):
        """
        Mount the snap volume
        * Create volume, FUSE mount the volume
        * perform I/O on mount points
        * Creating snapshot and activate snapshot
        * FUSE mount the snapshot created
        * Perform I/O on mounted snapshot
        * I/O should fail
        """
        # pylint: disable=too-many-statements
        # starting I/O
        g.log.info("Starting IO on all mounts...")
        g.log.info("mounts: %s", self.mounts)
        all_mounts_procs = []
        self.counter = 1
        for mount_obj in self.mounts:
            cmd = (
                "/usr/bin/env python %s create_files "
                "-f 10 --base-file-name file%d %s" %
                (self.script_upload_path, self.counter, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            self.counter += 100

        # Validate I/O
        self.assertTrue(validate_io_procs(all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")

        # Creating snapshot
        g.log.info("Starting to create snapshots")
        ret, _, _ = snap_create(self.mnode, self.volname, "snap1")
        self.assertEqual(ret, 0,
                         ("Failed to create snapshot for %s" % self.volname))
        g.log.info("Snapshot snap1 created successfully "
                   "for volume  %s", self.volname)

        # Activating snapshot
        g.log.info("Activating snapshot")
        ret, _, _ = snap_activate(self.mnode, "snap1")
        self.assertEqual(ret, 0, ("Failed to Activate snapshot snap1"))
        g.log.info("snap1 activated successfully")

        # redefine mounts
        self.mount_points = []
        self.mounts_dict_list = []
        for client in self.all_clients_info:
            mount = {
                'protocol': self.mount_type,
                'server': self.mnode,
                'volname': self.volname,
                'client': self.all_clients_info[client],
                'mountpoint': (os.path.join("/mnt/snap1")),
                'options': ''
            }
            self.mounts_dict_list.append(mount)
        self.mount1 = create_mount_objs(self.mounts_dict_list)
        g.log.info("Successfully made entry in self.mount1")

        # FUSE mount snap1 snapshot
        g.log.info("Mounting snapshot snap1")
        cmd = "mkdir -p  %s" % self.mpoint
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, ("Creation of directory %s"
                                  "for mounting"
                                  "volume snap1 failed" % (self.mpoint)))
        self.mount_points.append(self.mpoint)
        cmd = "mount -t glusterfs %s:/snaps/snap1/%s %s" % (
            self.mnode, self.volname, self.mpoint)
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, ("Failed to mount snap1"))
        g.log.info("snap1 is mounted Successfully")

        # starting I/O
        g.log.info("Starting IO on all mounts...")
        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s create_files "
                   "-f 10 --base-file-name file %s" %
                   (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # Validate I/O
        self.assertTrue(validate_io_procs(all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")

        # start I/O
        g.log.info("Starting IO on all mounts...")
        g.log.info("mounts: %s", self.mount1)
        all_mounts_procs = []
        for mount_obj in self.mount1:
            cmd = ("/usr/bin/env python %s create_files "
                   "-f 10 --base-file-name file %s" %
                   (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # validate io should fail
        self.assertFalse(validate_io_procs(all_mounts_procs, self.mounts),
                         "Unexpected: IO Successful on all clients")
        g.log.info("Expected: IO failed on clients")
    def test_snap_clone_snapd(self):
        """
        Steps:

        1. create a volume
        2. Create a snapshots and activate
        3. Clone the snapshot and mount it
        4. Check for snapd daemon
        5. enable uss and validate snapd
        5. stop cloned volume
        6. Validate snapd
        7. start cloned volume
        8. validate snapd
        9. Create 5 more snapshot
        10. Validate total number of
            snapshots created.
        11. Activate 5 snapshots
        12. Enable USS
        13. Validate snapd
        14. kill snapd on all nodes
        15. validate snapd running
        16. force start clone volume
        17. validate snaps inside .snaps directory
        """
        # pylint: disable=too-many-statements, too-many-locals

        # Starting I/O
        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s create_files "
                   "-f 10 --base-file-name file %s" % (
                       self.script_upload_path,
                       mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system, cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # Validate I/O
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("IO is successful on all mounts")

        # Creating snapshot
        ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
        self.assertEqual(ret, 0, ("Failed to create snapshot for volume %s"
                                  % self.volname))
        g.log.info("Snapshot %s created successfully for "
                   "volume %s", self.snap, self.volname)

        # Activating created snapshots
        ret, _, _ = snap_activate(self.mnode, self.snap)
        self.assertEqual(ret, 0, ("Failed to activate snapshot %s"
                                  % self.snap))
        g.log.info("Snapshot snap%s activated successfully", self.snap)

        # Snapshot list
        self.assertIsNotNone(
            get_snap_list(self.mnode), "Failed to list snapshot")
        g.log.info("Snapshot list command Successful")

        # Creating and starting a Clone of snapshot:
        ret, _, _ = snap_clone(self.mnode, self.snap, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to clone %s" % self.clone_vol1)
        g.log.info("Clone volume %s created successfully", self.clone_vol1)

        # Start the clone volumes
        ret, _, _ = volume_start(self.mnode, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to start %s" % self.clone_vol1)
        g.log.info("%s started successfully", self.clone_vol1)

        # Form server list
        brick_list = get_all_bricks(self.mnode, self.clone_vol1)
        for bricks in brick_list:
            self.server_lists.append(bricks.split(":")[0])
        self.server_list = list(set(self.server_lists))

        # Get volume info
        vol_info = get_volume_info(self.mnode, self.clone_vol1)
        self.assertIsNotNone(vol_info, "Failed to get vol info")
        g.log.info("Successfully in getting vol info")

        # Redefining mounts for cloned volume
        self.mount_points, self.mounts_dict_list = [], []
        for client in self.all_clients_info:
            mount = {
                'protocol': self.mount_type,
                'server': self.mnode,
                'volname': self.volname,
                'client': self.all_clients_info[client],
                'mountpoint': (path.join(
                    "%s" % self.mpoint)),
                'options': ''
            }
            self.mounts_dict_list.append(mount)
        self.mount1 = create_mount_objs(self.mounts_dict_list)
        self.mount_points.append(self.mpoint)
        g.log.info("Successfully made entry in self.mount1")

        # FUSE mount clone1 volume
        for mount_obj in self.mounts:
            ret, _, _ = mount_volume(self.clone_vol1, self.mount_type,
                                     self.mpoint,
                                     self.mnode, mount_obj.client_system)
            self.assertEqual(ret, 0, "Volume mount failed for clone1")
            g.log.info("%s mounted Successfully", self.clone_vol1)

            # Validate clone volume is mounted or not
            ret = is_mounted(self.clone_vol1, self.mpoint, self.mnode,
                             mount_obj.client_system, self.mount_type)
            self.assertTrue(ret, "Volume not mounted on mount point: "
                            "%s" % self.mpoint)
            g.log.info("Volume %s mounted on %s", self.clone_vol1, self.mpoint)

        # Log Cloned Volume information
        ret = log_volume_info_and_status(self.mnode, self.clone_vol1)
        self.assertTrue("Failed to Log Info and Status of Volume "
                        "%s" % self.clone_vol1)
        g.log.info("Successfully Logged Info and Status")

        # Validate snapd running on all nodes
        self.validate_snapd(check_condition=False)

        # Enable USS
        ret, _, _ = enable_uss(self.mnode, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to enable USS on cloned volume")
        g.log.info("Successfully enabled USS on Cloned volume")

        # Validate USS running
        self.validate_uss()

        # Validate snapd running on all nodes
        self.validate_snapd()

        # Stop cloned volume
        ret, _, _ = volume_stop(self.mnode, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to stop cloned volume "
                         "%s" % self.clone_vol1)
        g.log.info("Successfully Stopped Cloned volume %s", self.clone_vol1)

        # Validate snapd running on all nodes
        self.validate_snapd(check_condition=False)

        # Start cloned volume
        ret, _, _ = volume_start(self.mnode, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to start cloned volume"
                         " %s" % self.clone_vol1)
        g.log.info("Successfully started cloned volume"
                   " %s", self.clone_vol1)

        # Validate snapd running on all nodes
        self.validate_snapd()

        # Create 5 snapshots
        self.snaps_list = [('test_snap_clone_snapd-snap%s'
                            % i)for i in range(0, 5)]
        for snapname in self.snaps_list:
            ret, _, _ = snap_create(self.mnode, self.clone_vol1,
                                    snapname)
            self.assertEqual(ret, 0, ("Failed to create snapshot for volume"
                                      " %s" % self.clone_vol1))
            g.log.info("Snapshot %s created successfully for volume "
                       "%s", snapname, self.clone_vol1)

        # Validate USS running
        self.validate_uss()

        # Check snapshot under .snaps directory
        self.check_snaps()

        # Activate Snapshots
        for snapname in self.snaps_list:
            ret, _, _ = snap_activate(self.mnode, snapname)
            self.assertEqual(ret, 0, ("Failed to activate snapshot %s"
                                      % snapname))
            g.log.info("Snapshot %s activated "
                       "successfully", snapname)

        # Validate USS running
        self.validate_uss()

        # Validate snapshots under .snaps folder
        self.validate_snaps()

        # Kill snapd on node and validate snapd except management node
        for server in self.servers[1:]:
            ret, _, _ = terminate_snapd_on_node(server)
            self.assertEqual(ret, 0, "Failed to Kill snapd on node %s"
                             % server)
            g.log.info("snapd Killed Successfully on node %s", server)

            # Check snapd running
            ret = is_snapd_running(server, self.clone_vol1)
            self.assertTrue(ret, "Unexpected: Snapd running on node: "
                            "%s" % server)
            g.log.info("Expected: Snapd is not running on node:%s", server)

            # Check snapshots under .snaps folder
            g.log.info("Validating snapshots under .snaps")
            ret, _, _ = uss_list_snaps(self.clients[0], self.mpoint)
            self.assertEqual(ret, 0, "Target endpoint not connected")
            g.log.info("Successfully listed snapshots under .snaps")

        # Kill snapd in management node
        ret, _, _ = terminate_snapd_on_node(self.servers[0])
        self.assertEqual(ret, 0, "Failed to Kill snapd on node %s"
                         % self.servers[0])
        g.log.info("snapd Killed Successfully on node %s", self.servers[0])

        # Validate snapd running on all nodes
        self.validate_snapd(check_condition=False)

        # Validating snapshots under .snaps
        ret, _, _ = uss_list_snaps(self.clients[0], self.mpoint)
        self.assertNotEqual(ret, 0, "Unexpected: Successfully listed "
                            "snapshots under .snaps")
        g.log.info("Expected: Target endpoint not connected")

        # Start the Cloned volume(force start)
        ret, _, _ = volume_start(self.mnode, self.clone_vol1, force=True)
        self.assertEqual(ret, 0, "Failed to start cloned volume "
                         "%s" % self.clone_vol1)
        g.log.info("Successfully Started Cloned volume %s", self.clone_vol1)

        # Validate snapd running on all nodes
        self.validate_snapd()

        # Validate snapshots under .snaps folder
        self.validate_snaps()
    def setUpClass(cls):
        """Initialize all the variables necessary for testing Gluster
        """
        # Get all servers
        cls.all_servers = None
        if 'servers' in g.config and g.config['servers']:
            cls.all_servers = g.config['servers']
            cls.servers = cls.all_servers
        else:
            raise ConfigError("'servers' not defined in the global config")

        # Get all clients
        cls.all_clients = None
        if 'clients' in g.config and g.config['clients']:
            cls.all_clients = g.config['clients']
            cls.clients = cls.all_clients
        else:
            raise ConfigError("'clients' not defined in the global config")

        # Get all servers info
        cls.all_servers_info = None
        if 'servers_info' in g.config and g.config['servers_info']:
            cls.all_servers_info = g.config['servers_info']
        else:
            raise ConfigError("'servers_info' not defined in the global "
                              "config")

        # All clients_info
        cls.all_clients_info = None
        if 'clients_info' in g.config and g.config['clients_info']:
            cls.all_clients_info = g.config['clients_info']
        else:
            raise ConfigError("'clients_info' not defined in the global "
                              "config")

        # Set mnode : Node on which gluster commands are executed
        cls.mnode = cls.all_servers[0]

        # SMB Cluster info
        try:
            cls.smb_users_info = (
                g.config['gluster']['cluster_config']['smb']['users_info'])
        except KeyError:
            cls.smb_users_info = {}
            cls.smb_users_info['root'] = {}
            cls.smb_users_info['root']['password'] = '******'
            cls.smb_users_info['root']['acl'] = 'rwx'

        # NFS-Ganesha Cluster info
        try:
            cls.enable_nfs_ganesha = bool(g.config['gluster']['cluster_config']
                                          ['nfs_ganesha']['enable'])
            cls.num_of_nfs_ganesha_nodes = (g.config['gluster']
                                            ['cluster_config']['nfs_ganesha']
                                            ['num_of_nfs_ganesha_nodes'])
            cls.vips = (g.config['gluster']['cluster_config']['nfs_ganesha']
                        ['vips'])
        except KeyError:
            cls.enable_nfs_ganesha = False
            cls.num_of_nfs_ganesha_nodes = None
            cls.vips = []

        # Defining default volume_types configuration.
        default_volume_type_config = {
            'replicated': {
                'type': 'replicated',
                'replica_count': 3,
                'transport': 'tcp'
                },
            'dispersed': {
                'type': 'dispersed',
                'disperse_count': 6,
                'redundancy_count': 2,
                'transport': 'tcp'
                },
            'distributed': {
                'type': 'distributed',
                'dist_count': 4,
                'transport': 'tcp'
                },
            'distributed-replicated': {
                'type': 'distributed-replicated',
                'dist_count': 2,
                'replica_count': 3,
                'transport': 'tcp'
                },
            'distributed-dispersed': {
                'type': 'distributed-dispersed',
                'dist_count': 2,
                'disperse_count': 6,
                'redundancy_count': 2,
                'transport': 'tcp'
                }
            }

        # Default volume options which is applicable for all the volumes
        cls.volume_options = {}
        if (g.config.get('gluster') and
                g.config['gluster'].get('volume_options')):
            cls.volume_options = g.config['gluster']['volume_options']

        # If the volume is exported as SMB Share, then set the following
        # volume options on the share.
        cls.smb_share_options = {}
        if (g.config.get('gluster') and
                g.config['gluster'].get('smb_share_options')):
            cls.smb_share_options = (
                g.config['gluster']['smb_share_options'])

        # If the volume is exported as NFS-Ganesha export,
        # then set the following volume options on the export.
        cls.nfs_ganesha_export_options = {}
        if (g.config.get('gluster') and
                g.config['gluster'].get('nfs_ganesha_export_options')):
            cls.nfs_ganesha_export_options = (
                g.config['gluster']['nfs_ganesha_export_options'])

        # Get the volume configuration.
        cls.volume = {}
        if cls.volume_type:
            found_volume = False
            if 'gluster' in g.config:
                if 'volumes' in g.config['gluster']:
                    for volume in g.config['gluster']['volumes']:
                        if volume['voltype']['type'] == cls.volume_type:
                            cls.volume = copy.deepcopy(volume)
                            found_volume = True
                            break

            if found_volume:
                if 'name' not in cls.volume:
                    cls.volume['name'] = 'testvol_%s' % cls.volume_type

                if 'servers' not in cls.volume:
                    cls.volume['servers'] = cls.all_servers

            if not found_volume:
                try:
                    if g.config['gluster']['volume_types'][cls.volume_type]:
                        cls.volume['voltype'] = (g.config['gluster']
                                                 ['volume_types']
                                                 [cls.volume_type])
                except KeyError:
                    try:
                        cls.volume['voltype'] = (default_volume_type_config
                                                 [cls.volume_type])
                    except KeyError:
                        raise ConfigError("Unable to get configs of volume "
                                          "type: %s", cls.volume_type)
                cls.volume['name'] = 'testvol_%s' % cls.volume_type
                cls.volume['servers'] = cls.all_servers

            # Set volume options
            if 'options' not in cls.volume:
                cls.volume['options'] = cls.volume_options

            # Define Volume Useful Variables.
            cls.volname = cls.volume['name']
            cls.voltype = cls.volume['voltype']['type']
            cls.servers = cls.volume['servers']
            cls.mnode = cls.servers[0]
            cls.vol_options = cls.volume['options']

        # Get the mount configuration.
        cls.mounts = []
        if cls.mount_type:
            cls.mounts_dict_list = []
            found_mount = False
            if 'gluster' in g.config:
                if 'mounts' in g.config['gluster']:
                    for mount in g.config['gluster']['mounts']:
                        if mount['protocol'] == cls.mount_type:
                            temp_mount = {}
                            temp_mount['protocol'] = cls.mount_type
                            if 'volname' in mount and mount['volname']:
                                if mount['volname'] == cls.volname:
                                    temp_mount = copy.deepcopy(mount)
                                else:
                                    continue
                            else:
                                temp_mount['volname'] = cls.volname
                            if ('server' not in mount or
                                    (not mount['server'])):
                                temp_mount['server'] = cls.mnode
                            else:
                                temp_mount['server'] = mount['server']
                            if ('mountpoint' not in mount or
                                    (not mount['mountpoint'])):
                                temp_mount['mountpoint'] = (os.path.join(
                                    "/mnt", '_'.join([cls.volname,
                                                      cls.mount_type])))
                            else:
                                temp_mount['mountpoint'] = mount['mountpoint']
                            if ('client' not in mount or
                                    (not mount['client'])):
                                temp_mount['client'] = (
                                    cls.all_clients_info[
                                        random.choice(
                                            cls.all_clients_info.keys())]
                                    )
                            else:
                                temp_mount['client'] = mount['client']
                            if 'options' in mount and mount['options']:
                                temp_mount['options'] = mount['options']
                            else:
                                temp_mount['options'] = ''
                            cls.mounts_dict_list.append(temp_mount)
                            found_mount = True

            if not found_mount:
                for client in cls.all_clients_info.keys():
                    mount = {
                        'protocol': cls.mount_type,
                        'server': cls.mnode,
                        'volname': cls.volname,
                        'client': cls.all_clients_info[client],
                        'mountpoint': (os.path.join(
                            "/mnt", '_'.join([cls.volname, cls.mount_type]))),
                        'options': ''
                        }
                    cls.mounts_dict_list.append(mount)

            if cls.mount_type == 'cifs' or cls.mount_type == 'smb':
                for mount in cls.mounts_dict_list:
                    if 'smbuser' not in mount:
                        mount['smbuser'] = random.choice(
                            cls.smb_users_info.keys())
                        mount['smbpasswd'] = (
                            cls.smb_users_info[mount['smbuser']]['password'])

            cls.mounts = create_mount_objs(cls.mounts_dict_list)

            # Defining clients from mounts.
            cls.clients = []
            for mount in cls.mounts_dict_list:
                cls.clients.append(mount['client']['host'])
            cls.clients = list(set(cls.clients))

        # Gluster Logs info
        cls.server_gluster_logs_dirs = ["/var/log/glusterfs",
                                        "/var/log/samba"]
        cls.server_gluster_logs_files = ["/var/log/ganesha.log",
                                         "/var/log/ganesha-gfapi.log"]
        if ('gluster' in g.config and
                'server_gluster_logs_info' in g.config['gluster']):
            server_gluster_logs_info = (
                g.config['gluster']['server_gluster_logs_info'])
            if ('dirs' in server_gluster_logs_info and
                    server_gluster_logs_info['dirs']):
                cls.server_gluster_logs_dirs = (
                    server_gluster_logs_info['dirs'])

            if ('files' in server_gluster_logs_info and
                    server_gluster_logs_info['files']):
                cls.server_gluster_logs_files = (
                    server_gluster_logs_info['files'])

        cls.client_gluster_logs_dirs = ["/var/log/glusterfs"]
        cls.client_gluster_logs_files = []
        if ('gluster' in g.config and
                'client_gluster_logs_info' in g.config['gluster']):
            client_gluster_logs_info = (
                g.config['gluster']['client_gluster_logs_info'])
            if ('dirs' in client_gluster_logs_info and
                    client_gluster_logs_info['dirs']):
                cls.client_gluster_logs_dirs = (
                    client_gluster_logs_info['dirs'])

            if ('files' in client_gluster_logs_info and
                    client_gluster_logs_info['files']):
                cls.client_gluster_logs_files = (
                    client_gluster_logs_info['files'])

        # Have a unique string to recognize the test run for logging in
        # gluster logs
        if 'glustotest_run_id' not in g.config:
            g.config['glustotest_run_id'] = (
                datetime.datetime.now().strftime('%H_%M_%d_%m_%Y'))
        cls.glustotest_run_id = g.config['glustotest_run_id']

        msg = "Setupclass: %s : %s" % (cls.__name__, cls.glustotest_run_id)
        g.log.info(msg)
        cls.inject_msg_in_gluster_logs(msg)

        # Log the baseclass variables for debugging purposes
        g.log.debug("GlusterBaseClass Variables:\n %s", cls.__dict__)
Пример #8
0
    def setUpClass(cls):
        """Initialize all the variables necessary for testing Gluster
        """
        g.log.info("Setting up class: %s", cls.__name__)

        # Get all servers
        cls.all_servers = None
        if 'servers' in g.config and g.config['servers']:
            cls.all_servers = g.config['servers']
            cls.servers = cls.all_servers
        else:
            raise ConfigError("'servers' not defined in the global config")

        # Get all clients
        cls.all_clients = None
        if 'clients' in g.config and g.config['clients']:
            cls.all_clients = g.config['clients']
            cls.clients = cls.all_clients
        else:
            raise ConfigError("'clients' not defined in the global config")

        # Get all servers info
        cls.all_servers_info = None
        if 'servers_info' in g.config and g.config['servers_info']:
            cls.all_servers_info = g.config['servers_info']
        else:
            raise ConfigError("'servers_info' not defined in the global "
                              "config")

        # All clients_info
        cls.all_clients_info = None
        if 'clients_info' in g.config and g.config['clients_info']:
            cls.all_clients_info = g.config['clients_info']
        else:
            raise ConfigError("'clients_info' not defined in the global "
                              "config")

        # Set mnode : Node on which gluster commands are executed
        cls.mnode = cls.all_servers[0]

        # SMB Cluster info
        try:
            cls.smb_users_info = (
                g.config['gluster']['cluster_config']['smb']['users_info'])
        except KeyError:
            cls.smb_users_info = {}
            cls.smb_users_info['root'] = {}
            cls.smb_users_info['root']['password'] = '******'
            cls.smb_users_info['root']['acl'] = 'rwx'

        # NFS-Ganesha Cluster Info
        try:
            cls.enable_nfs_ganesha = bool(
                g.config['gluster']['cluster_config']['nfs_ganesha']['enable'])
        except KeyError:
            cls.enable_nfs_ganesha = False

        # Defining default volume_types configuration.
        default_volume_type_config = {
            'replicated': {
                'type': 'replicated',
                'replica_count': 3,
                'transport': 'tcp'
            },
            'dispersed': {
                'type': 'dispersed',
                'disperse_count': 6,
                'redundancy_count': 2,
                'transport': 'tcp'
            },
            'distributed': {
                'type': 'distributed',
                'dist_count': 4,
                'transport': 'tcp'
            },
            'distributed-replicated': {
                'type': 'distributed-replicated',
                'dist_count': 2,
                'replica_count': 3,
                'transport': 'tcp'
            },
            'distributed-dispersed': {
                'type': 'distributed-dispersed',
                'dist_count': 2,
                'disperse_count': 6,
                'redundancy_count': 2,
                'transport': 'tcp'
            }
        }

        # Get the volume configuration.
        cls.volume = {}
        if cls.volume_type:
            found_volume = False
            if 'gluster' in g.config:
                if 'volumes' in g.config['gluster']:
                    for volume in g.config['gluster']['volumes']:
                        if volume['voltype']['type'] == cls.volume_type:
                            cls.volume = copy.deepcopy(volume)
                            found_volume = True
                            break

            if found_volume:
                if 'name' not in cls.volume:
                    cls.volume['name'] = 'testvol_%s' % cls.volume_type

                if 'servers' not in cls.volume:
                    cls.volume['servers'] = cls.all_servers

            if not found_volume:
                try:
                    if g.config['gluster']['volume_types'][cls.volume_type]:
                        cls.volume['voltype'] = (
                            g.config['gluster']['volume_types'][
                                cls.volume_type])
                except KeyError:
                    try:
                        cls.volume['voltype'] = (
                            default_volume_type_config[cls.volume_type])
                    except KeyError:
                        raise ConfigError(
                            "Unable to get configs of volume "
                            "type: %s", cls.volume_type)
                cls.volume['name'] = 'testvol_%s' % cls.volume_type
                cls.volume['servers'] = cls.all_servers

            # Set volume options
            if 'options' not in cls.volume:
                cls.volume['options'] = {}

            # Define Volume Useful Variables.
            cls.volname = cls.volume['name']
            cls.voltype = cls.volume['voltype']['type']
            cls.servers = cls.volume['servers']
            cls.mnode = cls.servers[0]
            cls.vol_options = cls.volume['options']

        # Get the mount configuration.
        cls.mounts = []
        if cls.mount_type:
            cls.mounts_dict_list = []
            found_mount = False
            if 'gluster' in g.config:
                if 'mounts' in g.config['gluster']:
                    for mount in g.config['gluster']['mounts']:
                        if mount['protocol'] == cls.mount_type:
                            temp_mount = {}
                            temp_mount['protocol'] = cls.mount_type
                            if 'volname' in mount and mount['volname']:
                                if mount['volname'] == cls.volname:
                                    temp_mount = copy.deepcopy(mount)
                                else:
                                    continue
                            else:
                                temp_mount['volname'] = cls.volname
                            if ('server' not in temp_mount
                                    or (not temp_mount['server'])):
                                temp_mount['server'] = cls.mnode
                            if ('mountpoint' not in temp_mount
                                    or (not temp_mount['mountpoint'])):
                                temp_mount['mountpoint'] = (os.path.join(
                                    "/mnt",
                                    '_'.join([cls.volname, cls.mount_type])))
                            if ('client' not in temp_mount
                                    or (not temp_mount['client'])):
                                temp_mount['client'] = (
                                    cls.all_clients_info[random.choice(
                                        cls.all_clients_info.keys())])
                            cls.mounts_dict_list.append(temp_mount)
                            found_mount = True
            if not found_mount:
                for client in cls.all_clients_info.keys():
                    mount = {
                        'protocol':
                        cls.mount_type,
                        'server':
                        cls.mnode,
                        'volname':
                        cls.volname,
                        'client':
                        cls.all_clients_info[client],
                        'mountpoint':
                        (os.path.join("/mnt",
                                      '_'.join([cls.volname,
                                                cls.mount_type]))),
                        'options':
                        ''
                    }
                    cls.mounts_dict_list.append(mount)

            if cls.mount_type == 'cifs' or cls.mount_type == 'smb':
                for mount in cls.mounts_dict_list:
                    if 'smbuser' not in mount:
                        mount['smbuser'] = random.choice(
                            cls.smb_users_info.keys())
                        mount['smbpasswd'] = (
                            cls.smb_users_info[mount['smbuser']]['password'])

            cls.mounts = create_mount_objs(cls.mounts_dict_list)

            # Defining clients from mounts.
            cls.clients = []
            for mount in cls.mounts_dict_list:
                cls.clients.append(mount['client']['host'])
            cls.clients = list(set(cls.clients))

        # Log the baseclass variables for debugging purposes
        g.log.debug("GlusterBaseClass Variables:\n %s", cls.__dict__)
Пример #9
0
def configure_mounts(mnode, volname, mount_type, all_clients_info):
    """Defines the mount configurations.
    Args:
        mnode(str): Node on which volume should be mounted
        volname(str): Name of the volume
        mount_type(list): Defines the mount type
        all_clients_info(dict): Dict of clients information
    Returns:
        mounts_dict_list(list): List of the mount informations
        mounts(str) : GlusterMount instance
    """
    # Get the mount configuration
    mounts = []
    if mount_type:
        mounts_dict_list = []
        found_mount = False
        if g.config.get('gluster')['mounts']:
            for mount in g.config['gluster']['mounts']:
                if mount['protocol'] == mount_type:
                    temp_mount = {}
                    temp_mount['protocol'] = mount_type
                    if 'volname' in mount and mount['volname']:
                        if mount['volname'] == volname:
                            temp_mount = copy.deepcopy(mount)
                        else:
                            continue
                    else:
                        temp_mount['volname'] = volname
                    if ('server' not in mount or (not mount['server'])):
                        temp_mount['server'] = mnode
                    else:
                        temp_mount['server'] = mount['server']
                    if ('mountpoint' not in mount
                            or (not mount['mountpoint'])):
                        temp_mount['mountpoint'] = (os.path.join(
                            "/mnt", '_'.join([volname, mount_type])))
                    else:
                        temp_mount['mountpoint'] = mount['mountpoint']
                    if ('client' not in mount or (not mount['client'])):
                        temp_mount['client'] = (all_clients_info[random.choice(
                            all_clients_info.keys())])
                    else:
                        temp_mount['client'] = mount['client']
                    if 'options' in mount and mount['options']:
                        temp_mount['options'] = mount['options']
                    else:
                        temp_mount['options'] = ''
                    mounts_dict_list.append(temp_mount)
                    found_mount = True

        if not found_mount:
            for client in all_clients_info.keys():
                mount = {
                    'protocol':
                    mount_type,
                    'server':
                    mnode,
                    'volname':
                    volname,
                    'client':
                    all_clients_info[client],
                    'mountpoint':
                    (os.path.join("/mnt", '_'.join([volname, mount_type]))),
                    'options':
                    ''
                }
                mounts_dict_list.append(mount)
        mounts = create_mount_objs(mounts_dict_list)

        # Defining clients from mounts.
        clients = []
        for mount in mounts_dict_list:
            clients.append(mount['client']['host'])
        clients = list(set(clients))

        return clients, mounts_dict_list, mounts