def setUpClass(cls): """ Setup variable for nfs-ganesha tests. """ # pylint: disable=too-many-statements, too-many-branches super(NfsGaneshaClusterSetupClass, cls).setUpClass() # Check if enable_nfs_ganesha is set in config file if not cls.enable_nfs_ganesha: raise ConfigError("Please enable nfs ganesha in config") # Read num_of_nfs_ganesha_nodes from config file and create # nfs ganesha cluster accordingly cls.num_of_nfs_ganesha_nodes = int(cls.num_of_nfs_ganesha_nodes) cls.servers_in_nfs_ganesha_cluster = ( cls.servers[:cls.num_of_nfs_ganesha_nodes]) cls.vips_in_nfs_ganesha_cluster = ( cls.vips[:cls.num_of_nfs_ganesha_nodes]) # Obtain hostname of servers in ganesha cluster cls.ganesha_servers_hostname = [] for ganesha_server in cls.servers_in_nfs_ganesha_cluster: ret, hostname, _ = g.run(ganesha_server, "hostname") if ret: raise ExecutionError("Failed to obtain hostname of %s" % ganesha_server) hostname = hostname.strip() g.log.info("Obtained hostname: IP- %s, hostname- %s", ganesha_server, hostname) cls.ganesha_servers_hostname.append(hostname)
def __init__(self, mount): # Check for missing parameters for param in [ 'protocol', 'mountpoint', 'server', 'client', 'volname', 'options' ]: if param not in mount: raise ConfigError("Missing key %s" % param) # Get Protocol self.mounttype = mount.get('protocol', 'glusterfs') # Get mountpoint if bool(mount.get('mountpoint', False)): self.mountpoint = mount['mountpoint'] else: self.mountpoint = "/mnt/%s" % self.mounttype # Get server self.server_system = mount.get('server', None) # Get client self.client_system = mount.get('client', None) # Get Volume name self.volname = mount['volname'] # Get options self.options = mount.get('options', None)
def tearDown(self): # Calling GlusterBaseClass tearDown GlusterBaseClass.tearDown.im_func(self) # unmounting volume from Custom mount point g.log.info("UnMounting mount point %s", self.mpoint) cmd = "umount %s" % self.mpoint ret, _, _ = g.run(self.clients[0], cmd) if ret != 0: raise ExecutionError("Unmounted Successfully" "from %s" % (self.mpoint)) g.log.info("Successful in Unmounting %s", self.mpoint) ret, _, _ = snap_delete_by_volumename(self.mnode, self.volname) if ret != 0: raise ExecutionError("Failed to delete %s " "volume" % self.volname) g.log.info("Successfully deleted %s", self.volname) # Unmount and cleanup-volume g.log.info("Unmount and cleanup-volume") ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) if not ret: raise ConfigError("Failed to Unmount and Cleanup Volume") g.log.info("Cleanup Volume Successfully")
def setUpClass(cls): """Initialize all the variables necessary for testing Gluster.""" # Get all servers cls.all_servers = None if 'servers' in g.config and g.config['servers']: cls.all_servers = g.config['servers'] cls.servers = cls.all_servers else: raise ConfigError("'servers' not defined in the global config") # Get all slaves cls.slaves = None if g.config.get('slaves'): cls.slaves = g.config['slaves'] # Set mnode_slave : Node on which slave commands are executed cls.mnode_slave = cls.slaves[0] # Slave IP's cls.slaves_ip = cls.get_ip_from_hostname(cls.slaves) # Get all clients cls.all_clients = None if g.config.get('clients'): cls.all_clients = g.config['clients'] cls.clients = cls.all_clients else: raise ConfigError("'clients' not defined in the global config") # Get all servers info cls.all_servers_info = None if g.config.get('servers_info'): cls.all_servers_info = g.config['servers_info'] else: raise ConfigError("'servers_info' not defined in the global " "config") # Get all slaves info cls.all_slaves_info = None if g.config.get('slaves_info'): cls.all_slaves_info = g.config['slaves_info'] # All clients_info cls.all_clients_info = None if g.config.get('clients_info'): cls.all_clients_info = g.config['clients_info'] else: raise ConfigError("'clients_info' not defined in the global " "config") # get lv list cls.lv_list = cls.get_unique_lv_list_from_all_servers() # Set mnode : Node on which gluster commands are executed cls.mnode = cls.all_servers[0] # Server IP's cls.servers_ips = cls.get_ip_from_hostname(cls.servers) # SMB Cluster info try: cls.smb_users_info = ( g.config['gluster']['cluster_config']['smb']['users_info']) except KeyError: cls.smb_users_info = {} cls.smb_users_info['root'] = {} cls.smb_users_info['root']['password'] = '******' cls.smb_users_info['root']['acl'] = 'rwx' # NFS-Ganesha Cluster info try: cls.enable_nfs_ganesha = ( g.config['gluster']['cluster_config']['nfs_ganesha']['enable'] in ('TRUE', 'True', 'true', 'YES', 'Yes', 'yes', '1', 1)) cls.num_of_nfs_ganesha_nodes = g.config['gluster'][ 'cluster_config']['nfs_ganesha']['num_of_nfs_ganesha_nodes'] cls.vips = ( g.config['gluster']['cluster_config']['nfs_ganesha']['vips']) except KeyError: cls.enable_nfs_ganesha = False cls.num_of_nfs_ganesha_nodes = None cls.vips = [] # Geo-rep Cluster information try: cls.geo_rep_info = ( g.config['gluster']['geo_rep']['cluster_config']) except KeyError: cls.geo_rep_info = {} cls.geo_rep_info['root'] = {} cls.geo_rep_info['user'] = {} cls.geo_rep_info['root']['password'] = '' cls.geo_rep_info['user']['name'] = '' cls.geo_rep_info['user']['password'] = '' cls.geo_rep_info['user']['group'] = '' # Defining default volume_types configuration. cls.default_volume_type_config = { 'replicated': { 'type': 'replicated', 'replica_count': 3, 'transport': 'tcp', }, 'dispersed': { 'type': 'dispersed', 'disperse_count': 6, 'redundancy_count': 2, 'transport': 'tcp', }, 'distributed': { 'type': 'distributed', 'dist_count': 4, 'transport': 'tcp', }, 'distributed-replicated': { 'type': 'distributed-replicated', 'dist_count': 2, 'replica_count': 3, 'transport': 'tcp', }, 'distributed-dispersed': { 'type': 'distributed-dispersed', 'dist_count': 2, 'disperse_count': 6, 'redundancy_count': 2, 'transport': 'tcp', }, 'arbiter': { 'type': 'arbiter', 'replica_count': 3, 'arbiter_count': 1, 'transport': 'tcp', }, 'distributed-arbiter': { 'type': 'distributed-arbiter', 'dist_count': 2, 'replica_count': 3, 'arbiter_count': 1, 'tranport': 'tcp', } } # Check if default volume_type configuration is provided in config yml if g.config.get('gluster', {}).get('volume_types'): default_volume_type_from_config = ( g.config['gluster']['volume_types']) for volume_type in default_volume_type_from_config.keys(): if default_volume_type_from_config[volume_type]: if volume_type in cls.default_volume_type_config: cls.default_volume_type_config[volume_type] = ( default_volume_type_from_config[volume_type]) # Create Volume with force option cls.volume_create_force = False if g.config.get('gluster', {}).get('volume_create_force'): cls.volume_create_force = ( g.config['gluster']['volume_create_force']) # Default volume options which is applicable for all the volumes cls.volume_options = {} if g.config.get('gluster', {}).get('volume_options'): cls.volume_options = g.config['gluster']['volume_options'] # If the volume is exported as SMB Share, then set the following # volume options on the share. cls.smb_share_options = {} if g.config.get('gluster', {}).get('smb_share_options'): cls.smb_share_options = g.config['gluster']['smb_share_options'] # If the volume is exported as NFS-Ganesha export, # then set the following volume options on the export. cls.nfs_ganesha_export_options = {} if g.config.get('gluster', {}).get('nfs_ganesha_export_options'): cls.nfs_ganesha_export_options = ( g.config['gluster']['nfs_ganesha_export_options']) # Get the volume configuration. cls.volume = {} if cls.volume_type: for volume in g.config.get('gluster', {}).get('volumes', []): if volume['voltype']['type'] == cls.volume_type: cls.volume = deepcopy(volume) if 'name' not in cls.volume: cls.volume['name'] = 'testvol_%s' % cls.volume_type if 'servers' not in cls.volume: cls.volume['servers'] = cls.all_servers break else: try: if g.config['gluster']['volume_types'][cls.volume_type]: cls.volume['voltype'] = ( g.config['gluster']['volume_types'][ cls.volume_type]) except KeyError: try: cls.volume['voltype'] = ( cls.default_volume_type_config[cls.volume_type]) except KeyError: raise ConfigError( "Unable to get configs of volume " "type: %s", cls.volume_type) cls.volume['name'] = 'testvol_%s' % cls.volume_type cls.volume['servers'] = cls.all_servers # Set volume options if 'options' not in cls.volume: cls.volume['options'] = cls.volume_options # Define Volume Useful Variables. cls.volname = cls.volume['name'] cls.voltype = cls.volume['voltype']['type'] cls.servers = cls.volume['servers'] cls.mnode = cls.servers[0] cls.vol_options = cls.volume['options'] # Define useful variable for geo-rep volumes. if cls.slaves: # For master volume cls.master_volume = cls.volume cls.master_volume['name'] = ('master_testvol_%s' % cls.volume_type) cls.master_volname = cls.master_volume['name'] cls.master_voltype = (cls.master_volume['voltype']['type']) # For slave volume cls.slave_volume = deepcopy(cls.volume) cls.slave_volume['name'] = ('slave_testvol_%s' % cls.volume_type) cls.slave_volume['servers'] = cls.slaves cls.slave_volname = cls.slave_volume['name'] cls.slave_voltype = (cls.slave_volume['voltype']['type']) # Get the mount configuration. cls.mounts = [] if cls.mount_type: cls.mounts_dict_list = [] for mount in g.config.get('gluster', {}).get('mounts', []): if mount['protocol'] != cls.mount_type: continue temp_mount = { 'protocol': cls.mount_type, 'volname': cls.volname, } if mount.get('volname'): if mount['volname'] == cls.volname: temp_mount = deepcopy(mount) else: continue temp_mount.update({ 'server': mount.get('server', cls.mnode), 'mountpoint': mount.get( 'mountpoint', path_join("/mnt", '_'.join([cls.volname, cls.mount_type]))), 'client': mount.get( 'client', cls.all_clients_info[random_choice( list(cls.all_clients_info.keys()))]), 'options': mount.get('options', ''), }) cls.mounts_dict_list.append(temp_mount) if not cls.mounts_dict_list: for client in cls.all_clients_info.keys(): cls.mounts_dict_list.append({ 'protocol': cls.mount_type, 'server': cls.mnode, 'volname': cls.volname, 'client': cls.all_clients_info[client], 'mountpoint': path_join("/mnt", '_'.join([cls.volname, cls.mount_type])), 'options': '', }) if cls.mount_type == 'cifs' or cls.mount_type == 'smb': for mount in cls.mounts_dict_list: if 'smbuser' not in mount: mount['smbuser'] = random_choice( list(cls.smb_users_info.keys())) mount['smbpasswd'] = ( cls.smb_users_info[mount['smbuser']]['password']) cls.mounts = create_mount_objs(cls.mounts_dict_list) # Setting mounts for geo-rep volumes. if cls.slaves: # For master volume mount cls.master_mounts = cls.mounts # For slave volume mount slave_mount_dict_list = deepcopy(cls.mounts_dict_list) for mount_dict in slave_mount_dict_list: mount_dict['volname'] = cls.slave_volume mount_dict['server'] = cls.mnode_slave mount_dict['mountpoint'] = path_join( "/mnt", '_'.join([cls.slave_volname, cls.mount_type])) cls.slave_mounts = create_mount_objs(slave_mount_dict_list) # Defining clients from mounts. cls.clients = [] for mount in cls.mounts_dict_list: cls.clients.append(mount['client']['host']) cls.clients = list(set(cls.clients)) # Gluster Logs info cls.server_gluster_logs_dirs = ["/var/log/glusterfs", "/var/log/samba"] cls.server_gluster_logs_files = [ "/var/log/ganesha.log", "/var/log/ganesha-gfapi.log" ] if g.config.get('gluster', {}).get('server_gluster_logs_info'): server_gluster_logs_info = ( g.config['gluster']['server_gluster_logs_info']) if server_gluster_logs_info.get('dirs'): cls.server_gluster_logs_dirs = server_gluster_logs_info['dirs'] if server_gluster_logs_info.get('files'): cls.server_gluster_logs_files = ( server_gluster_logs_info['files']) cls.client_gluster_logs_dirs = ["/var/log/glusterfs"] cls.client_gluster_logs_files = [] if g.config.get('gluster', {}).get('client_gluster_logs_info'): client_gluster_logs_info = ( g.config['gluster']['client_gluster_logs_info']) if client_gluster_logs_info.get('dirs'): cls.client_gluster_logs_dirs = client_gluster_logs_info['dirs'] if client_gluster_logs_info.get('files'): cls.client_gluster_logs_files = ( client_gluster_logs_info['files']) # Have a unique string to recognize the test run for logging in # gluster logs if 'glustotest_run_id' not in g.config: g.config['glustotest_run_id'] = ( datetime.now().strftime('%H_%M_%d_%m_%Y')) cls.glustotest_run_id = g.config['glustotest_run_id'] msg = "Setupclass: %s : %s" % (cls.__name__, cls.glustotest_run_id) g.log.info(msg) cls.inject_msg_in_gluster_logs(msg) # Log the baseclass variables for debugging purposes g.log.debug("GlusterBaseClass Variables:\n %s", cls.__dict__)
def setUpClass(cls): """Setup nfs-ganesha cluster tests. """ # Check if gdeploy is installed on glusto-tests management node. ret, _, _ = g.run_local("gdeploy --version") if ret != 0: raise ConfigError("Please install gdeploy to run the scripts") GlusterBaseClass.setUpClass.im_func(cls) # Check if enable_nfs_ganesha is set in config file if not cls.enable_nfs_ganesha: raise ConfigError("Please enable nfs ganesha in config") # Read num_of_nfs_ganesha_nodes from config file and create # nfs ganesha cluster accordingly cls.num_of_nfs_ganesha_nodes = int(cls.num_of_nfs_ganesha_nodes) cls.servers_in_nfs_ganesha_cluster = ( cls.servers[:cls.num_of_nfs_ganesha_nodes]) cls.vips_in_nfs_ganesha_cluster = ( cls.vips[:cls.num_of_nfs_ganesha_nodes]) # Create nfs ganesha cluster if not exists already if (is_nfs_ganesha_cluster_exists( cls.servers_in_nfs_ganesha_cluster[0])): if is_nfs_ganesha_cluster_in_healthy_state( cls.servers_in_nfs_ganesha_cluster[0]): g.log.info("Nfs-ganesha Cluster exists and is in healthy " "state. Skipping cluster creation...") else: g.log.info("Nfs-ganesha Cluster exists and is not in " "healthy state.") g.log.info("Tearing down existing cluster which is not in " "healthy state") ganesha_ha_file = ("/var/run/gluster/shared_storage/" "nfs-ganesha/ganesha-ha.conf") g.log.info("Collecting server details of existing " "nfs ganesha cluster") conn = g.rpyc_get_connection( cls.servers_in_nfs_ganesha_cluster[0], user="******") if conn is None: tmp_node = cls.servers_in_nfs_ganesha_cluster[0] raise ExecutionError("Unable to get connection to 'root' " " of node %s " % tmp_node) if not conn.modules.os.path.exists(ganesha_ha_file): raise ExecutionError("Unable to locate %s" % ganesha_ha_file) with conn.builtin.open(ganesha_ha_file, "r") as fh: ganesha_ha_contents = fh.read() g.rpyc_close_connection( host=cls.servers_in_nfs_ganesha_cluster[0], user="******") servers_in_existing_cluster = re.findall( r'VIP_(.*)\=.*', ganesha_ha_contents) ret = teardown_nfs_ganesha_cluster(servers_in_existing_cluster, force=True) if not ret: raise ExecutionError("Failed to teardown nfs " "ganesha cluster") g.log.info("Existing cluster got teardown successfully") g.log.info("Creating nfs-ganesha cluster of %s nodes" % str(cls.num_of_nfs_ganesha_nodes)) g.log.info("Nfs-ganesha cluster node info: %s" % cls.servers_in_nfs_ganesha_cluster) g.log.info("Nfs-ganesha cluster vip info: %s" % cls.vips_in_nfs_ganesha_cluster) ret = create_nfs_ganesha_cluster( cls.servers_in_nfs_ganesha_cluster, cls.vips_in_nfs_ganesha_cluster) if not ret: raise ExecutionError("Failed to create " "nfs-ganesha cluster") else: g.log.info("Creating nfs-ganesha cluster of %s nodes" % str(cls.num_of_nfs_ganesha_nodes)) g.log.info("Nfs-ganesha cluster node info: %s" % cls.servers_in_nfs_ganesha_cluster) g.log.info("Nfs-ganesha cluster vip info: %s" % cls.vips_in_nfs_ganesha_cluster) ret = create_nfs_ganesha_cluster( cls.servers_in_nfs_ganesha_cluster, cls.vips_in_nfs_ganesha_cluster) if not ret: raise ExecutionError("Failed to create " "nfs-ganesha cluster") if is_nfs_ganesha_cluster_in_healthy_state( cls.servers_in_nfs_ganesha_cluster[0]): g.log.info("Nfs-ganesha Cluster exists is in healthy state") else: raise ExecutionError("Nfs-ganesha Cluster setup Failed") ret = set_nfs_ganesha_client_configuration(cls.clients) if not ret: raise ExecutionError("Failed to do client nfs ganesha " "configuration") for server in cls.servers: for client in cls.clients: cmd = ("if [ -z \"$(grep -R \"%s\" /etc/hosts)\" ]; then " "echo \"%s %s\" >> /etc/hosts; fi" % (client, socket.gethostbyname(client), client)) ret, _, _ = g.run(server, cmd) if ret != 0: g.log.error("Failed to add entry of client %s in " "/etc/hosts of server %s" % (client, server)) for client in cls.clients: for server in cls.servers: cmd = ("if [ -z \"$(grep -R \"%s\" /etc/hosts)\" ]; then " "echo \"%s %s\" >> /etc/hosts; fi" % (server, socket.gethostbyname(server), server)) ret, _, _ = g.run(client, cmd) if ret != 0: g.log.error("Failed to add entry of server %s in " "/etc/hosts of client %s" % (server, client))
def setUpClass(cls): """Initialize all the variables necessary for testing Gluster """ # Get all servers cls.all_servers = None if 'servers' in g.config and g.config['servers']: cls.all_servers = g.config['servers'] cls.servers = cls.all_servers else: raise ConfigError("'servers' not defined in the global config") # Get all clients cls.all_clients = None if 'clients' in g.config and g.config['clients']: cls.all_clients = g.config['clients'] cls.clients = cls.all_clients else: raise ConfigError("'clients' not defined in the global config") # Get all servers info cls.all_servers_info = None if 'servers_info' in g.config and g.config['servers_info']: cls.all_servers_info = g.config['servers_info'] else: raise ConfigError("'servers_info' not defined in the global " "config") # All clients_info cls.all_clients_info = None if 'clients_info' in g.config and g.config['clients_info']: cls.all_clients_info = g.config['clients_info'] else: raise ConfigError("'clients_info' not defined in the global " "config") # Set mnode : Node on which gluster commands are executed cls.mnode = cls.all_servers[0] # SMB Cluster info try: cls.smb_users_info = ( g.config['gluster']['cluster_config']['smb']['users_info']) except KeyError: cls.smb_users_info = {} cls.smb_users_info['root'] = {} cls.smb_users_info['root']['password'] = '******' cls.smb_users_info['root']['acl'] = 'rwx' # NFS-Ganesha Cluster info try: cls.enable_nfs_ganesha = bool(g.config['gluster']['cluster_config'] ['nfs_ganesha']['enable']) cls.num_of_nfs_ganesha_nodes = (g.config['gluster'] ['cluster_config']['nfs_ganesha'] ['num_of_nfs_ganesha_nodes']) cls.vips = (g.config['gluster']['cluster_config']['nfs_ganesha'] ['vips']) except KeyError: cls.enable_nfs_ganesha = False cls.num_of_nfs_ganesha_nodes = None cls.vips = [] # Defining default volume_types configuration. default_volume_type_config = { 'replicated': { 'type': 'replicated', 'replica_count': 3, 'transport': 'tcp' }, 'dispersed': { 'type': 'dispersed', 'disperse_count': 6, 'redundancy_count': 2, 'transport': 'tcp' }, 'distributed': { 'type': 'distributed', 'dist_count': 4, 'transport': 'tcp' }, 'distributed-replicated': { 'type': 'distributed-replicated', 'dist_count': 2, 'replica_count': 3, 'transport': 'tcp' }, 'distributed-dispersed': { 'type': 'distributed-dispersed', 'dist_count': 2, 'disperse_count': 6, 'redundancy_count': 2, 'transport': 'tcp' } } # Default volume options which is applicable for all the volumes cls.volume_options = {} if (g.config.get('gluster') and g.config['gluster'].get('volume_options')): cls.volume_options = g.config['gluster']['volume_options'] # If the volume is exported as SMB Share, then set the following # volume options on the share. cls.smb_share_options = {} if (g.config.get('gluster') and g.config['gluster'].get('smb_share_options')): cls.smb_share_options = ( g.config['gluster']['smb_share_options']) # If the volume is exported as NFS-Ganesha export, # then set the following volume options on the export. cls.nfs_ganesha_export_options = {} if (g.config.get('gluster') and g.config['gluster'].get('nfs_ganesha_export_options')): cls.nfs_ganesha_export_options = ( g.config['gluster']['nfs_ganesha_export_options']) # Get the volume configuration. cls.volume = {} if cls.volume_type: found_volume = False if 'gluster' in g.config: if 'volumes' in g.config['gluster']: for volume in g.config['gluster']['volumes']: if volume['voltype']['type'] == cls.volume_type: cls.volume = copy.deepcopy(volume) found_volume = True break if found_volume: if 'name' not in cls.volume: cls.volume['name'] = 'testvol_%s' % cls.volume_type if 'servers' not in cls.volume: cls.volume['servers'] = cls.all_servers if not found_volume: try: if g.config['gluster']['volume_types'][cls.volume_type]: cls.volume['voltype'] = (g.config['gluster'] ['volume_types'] [cls.volume_type]) except KeyError: try: cls.volume['voltype'] = (default_volume_type_config [cls.volume_type]) except KeyError: raise ConfigError("Unable to get configs of volume " "type: %s", cls.volume_type) cls.volume['name'] = 'testvol_%s' % cls.volume_type cls.volume['servers'] = cls.all_servers # Set volume options if 'options' not in cls.volume: cls.volume['options'] = cls.volume_options # Define Volume Useful Variables. cls.volname = cls.volume['name'] cls.voltype = cls.volume['voltype']['type'] cls.servers = cls.volume['servers'] cls.mnode = cls.servers[0] cls.vol_options = cls.volume['options'] # Get the mount configuration. cls.mounts = [] if cls.mount_type: cls.mounts_dict_list = [] found_mount = False if 'gluster' in g.config: if 'mounts' in g.config['gluster']: for mount in g.config['gluster']['mounts']: if mount['protocol'] == cls.mount_type: temp_mount = {} temp_mount['protocol'] = cls.mount_type if 'volname' in mount and mount['volname']: if mount['volname'] == cls.volname: temp_mount = copy.deepcopy(mount) else: continue else: temp_mount['volname'] = cls.volname if ('server' not in mount or (not mount['server'])): temp_mount['server'] = cls.mnode else: temp_mount['server'] = mount['server'] if ('mountpoint' not in mount or (not mount['mountpoint'])): temp_mount['mountpoint'] = (os.path.join( "/mnt", '_'.join([cls.volname, cls.mount_type]))) else: temp_mount['mountpoint'] = mount['mountpoint'] if ('client' not in mount or (not mount['client'])): temp_mount['client'] = ( cls.all_clients_info[ random.choice( cls.all_clients_info.keys())] ) else: temp_mount['client'] = mount['client'] if 'options' in mount and mount['options']: temp_mount['options'] = mount['options'] else: temp_mount['options'] = '' cls.mounts_dict_list.append(temp_mount) found_mount = True if not found_mount: for client in cls.all_clients_info.keys(): mount = { 'protocol': cls.mount_type, 'server': cls.mnode, 'volname': cls.volname, 'client': cls.all_clients_info[client], 'mountpoint': (os.path.join( "/mnt", '_'.join([cls.volname, cls.mount_type]))), 'options': '' } cls.mounts_dict_list.append(mount) if cls.mount_type == 'cifs' or cls.mount_type == 'smb': for mount in cls.mounts_dict_list: if 'smbuser' not in mount: mount['smbuser'] = random.choice( cls.smb_users_info.keys()) mount['smbpasswd'] = ( cls.smb_users_info[mount['smbuser']]['password']) cls.mounts = create_mount_objs(cls.mounts_dict_list) # Defining clients from mounts. cls.clients = [] for mount in cls.mounts_dict_list: cls.clients.append(mount['client']['host']) cls.clients = list(set(cls.clients)) # Gluster Logs info cls.server_gluster_logs_dirs = ["/var/log/glusterfs", "/var/log/samba"] cls.server_gluster_logs_files = ["/var/log/ganesha.log", "/var/log/ganesha-gfapi.log"] if ('gluster' in g.config and 'server_gluster_logs_info' in g.config['gluster']): server_gluster_logs_info = ( g.config['gluster']['server_gluster_logs_info']) if ('dirs' in server_gluster_logs_info and server_gluster_logs_info['dirs']): cls.server_gluster_logs_dirs = ( server_gluster_logs_info['dirs']) if ('files' in server_gluster_logs_info and server_gluster_logs_info['files']): cls.server_gluster_logs_files = ( server_gluster_logs_info['files']) cls.client_gluster_logs_dirs = ["/var/log/glusterfs"] cls.client_gluster_logs_files = [] if ('gluster' in g.config and 'client_gluster_logs_info' in g.config['gluster']): client_gluster_logs_info = ( g.config['gluster']['client_gluster_logs_info']) if ('dirs' in client_gluster_logs_info and client_gluster_logs_info['dirs']): cls.client_gluster_logs_dirs = ( client_gluster_logs_info['dirs']) if ('files' in client_gluster_logs_info and client_gluster_logs_info['files']): cls.client_gluster_logs_files = ( client_gluster_logs_info['files']) # Have a unique string to recognize the test run for logging in # gluster logs if 'glustotest_run_id' not in g.config: g.config['glustotest_run_id'] = ( datetime.datetime.now().strftime('%H_%M_%d_%m_%Y')) cls.glustotest_run_id = g.config['glustotest_run_id'] msg = "Setupclass: %s : %s" % (cls.__name__, cls.glustotest_run_id) g.log.info(msg) cls.inject_msg_in_gluster_logs(msg) # Log the baseclass variables for debugging purposes g.log.debug("GlusterBaseClass Variables:\n %s", cls.__dict__)
def setUpClass(cls): """Initialize all the variables necessary for testing Gluster """ g.log.info("Setting up class: %s", cls.__name__) # Get all servers cls.all_servers = None if 'servers' in g.config and g.config['servers']: cls.all_servers = g.config['servers'] cls.servers = cls.all_servers else: raise ConfigError("'servers' not defined in the global config") # Get all clients cls.all_clients = None if 'clients' in g.config and g.config['clients']: cls.all_clients = g.config['clients'] cls.clients = cls.all_clients else: raise ConfigError("'clients' not defined in the global config") # Get all servers info cls.all_servers_info = None if 'servers_info' in g.config and g.config['servers_info']: cls.all_servers_info = g.config['servers_info'] else: raise ConfigError("'servers_info' not defined in the global " "config") # All clients_info cls.all_clients_info = None if 'clients_info' in g.config and g.config['clients_info']: cls.all_clients_info = g.config['clients_info'] else: raise ConfigError("'clients_info' not defined in the global " "config") # Set mnode : Node on which gluster commands are executed cls.mnode = cls.all_servers[0] # SMB Cluster info try: cls.smb_users_info = ( g.config['gluster']['cluster_config']['smb']['users_info']) except KeyError: cls.smb_users_info = {} cls.smb_users_info['root'] = {} cls.smb_users_info['root']['password'] = '******' cls.smb_users_info['root']['acl'] = 'rwx' # NFS-Ganesha Cluster Info try: cls.enable_nfs_ganesha = bool( g.config['gluster']['cluster_config']['nfs_ganesha']['enable']) except KeyError: cls.enable_nfs_ganesha = False # Defining default volume_types configuration. default_volume_type_config = { 'replicated': { 'type': 'replicated', 'replica_count': 3, 'transport': 'tcp' }, 'dispersed': { 'type': 'dispersed', 'disperse_count': 6, 'redundancy_count': 2, 'transport': 'tcp' }, 'distributed': { 'type': 'distributed', 'dist_count': 4, 'transport': 'tcp' }, 'distributed-replicated': { 'type': 'distributed-replicated', 'dist_count': 2, 'replica_count': 3, 'transport': 'tcp' }, 'distributed-dispersed': { 'type': 'distributed-dispersed', 'dist_count': 2, 'disperse_count': 6, 'redundancy_count': 2, 'transport': 'tcp' } } # Get the volume configuration. cls.volume = {} if cls.volume_type: found_volume = False if 'gluster' in g.config: if 'volumes' in g.config['gluster']: for volume in g.config['gluster']['volumes']: if volume['voltype']['type'] == cls.volume_type: cls.volume = copy.deepcopy(volume) found_volume = True break if found_volume: if 'name' not in cls.volume: cls.volume['name'] = 'testvol_%s' % cls.volume_type if 'servers' not in cls.volume: cls.volume['servers'] = cls.all_servers if not found_volume: try: if g.config['gluster']['volume_types'][cls.volume_type]: cls.volume['voltype'] = ( g.config['gluster']['volume_types'][ cls.volume_type]) except KeyError: try: cls.volume['voltype'] = ( default_volume_type_config[cls.volume_type]) except KeyError: raise ConfigError( "Unable to get configs of volume " "type: %s", cls.volume_type) cls.volume['name'] = 'testvol_%s' % cls.volume_type cls.volume['servers'] = cls.all_servers # Set volume options if 'options' not in cls.volume: cls.volume['options'] = {} # Define Volume Useful Variables. cls.volname = cls.volume['name'] cls.voltype = cls.volume['voltype']['type'] cls.servers = cls.volume['servers'] cls.mnode = cls.servers[0] cls.vol_options = cls.volume['options'] # Get the mount configuration. cls.mounts = [] if cls.mount_type: cls.mounts_dict_list = [] found_mount = False if 'gluster' in g.config: if 'mounts' in g.config['gluster']: for mount in g.config['gluster']['mounts']: if mount['protocol'] == cls.mount_type: temp_mount = {} temp_mount['protocol'] = cls.mount_type if 'volname' in mount and mount['volname']: if mount['volname'] == cls.volname: temp_mount = copy.deepcopy(mount) else: continue else: temp_mount['volname'] = cls.volname if ('server' not in temp_mount or (not temp_mount['server'])): temp_mount['server'] = cls.mnode if ('mountpoint' not in temp_mount or (not temp_mount['mountpoint'])): temp_mount['mountpoint'] = (os.path.join( "/mnt", '_'.join([cls.volname, cls.mount_type]))) if ('client' not in temp_mount or (not temp_mount['client'])): temp_mount['client'] = ( cls.all_clients_info[random.choice( cls.all_clients_info.keys())]) cls.mounts_dict_list.append(temp_mount) found_mount = True if not found_mount: for client in cls.all_clients_info.keys(): mount = { 'protocol': cls.mount_type, 'server': cls.mnode, 'volname': cls.volname, 'client': cls.all_clients_info[client], 'mountpoint': (os.path.join("/mnt", '_'.join([cls.volname, cls.mount_type]))), 'options': '' } cls.mounts_dict_list.append(mount) if cls.mount_type == 'cifs' or cls.mount_type == 'smb': for mount in cls.mounts_dict_list: if 'smbuser' not in mount: mount['smbuser'] = random.choice( cls.smb_users_info.keys()) mount['smbpasswd'] = ( cls.smb_users_info[mount['smbuser']]['password']) cls.mounts = create_mount_objs(cls.mounts_dict_list) # Defining clients from mounts. cls.clients = [] for mount in cls.mounts_dict_list: cls.clients.append(mount['client']['host']) cls.clients = list(set(cls.clients)) # Log the baseclass variables for debugging purposes g.log.debug("GlusterBaseClass Variables:\n %s", cls.__dict__)