class DParentTest(metaclass=abc.ABCMeta): """ This class contains the standard info and methods which are needed by disruptive testss TEST_RES: states the result of the test case """ def __init__(self, mname: str, param_obj, volume_type: str, env_obj, log_path: str, log_level: str = 'I'): """ Creates volume And runs the specific component in the test case """ server_details = param_obj.get_server_config() client_details = param_obj.get_client_config() self.TEST_RES = True self.setup_done = False self.volume_type = volume_type self.vol_type_inf = param_obj.get_volume_types() self.test_name = mname self.vol_name = (f"{mname}-{volume_type}") self._configure(self.vol_name, server_details, client_details, env_obj, log_path, log_level) self.server_list = param_obj.get_server_ip_list() self.client_list = param_obj.get_client_ip_list() self.brick_roots = param_obj.get_brick_roots() def _configure(self, mname: str, server_details: dict, client_details: dict, env_obj, log_path: str, log_level: str): self.redant = RedantMixin(server_details, client_details, env_obj) self.redant.init_logger(mname, log_path, log_level) self.redant.establish_connection() def setup_test(self): pass @classmethod def setup_custom_enable(cls, fun): def inner(self, *args, **kwargs): self.setup_done = True return fun(self, *args, **kwargs) return inner @abc.abstractmethod def run_test(self, redant): pass def parent_run_test(self): """ Function to handle the exception logic and invokes the run_test which is overridden by every TC. """ try: self.redant.start_glusterd(self.server_list) self.redant.create_cluster(self.server_list) # Call setup in case you want to override volume creation, # start, mounting in the TC self.setup_test() if not self.setup_done and self.volume_type != "Generic": self.redant.setup_volume(self.vol_name, self.server_list[0], self.vol_type_inf[self.volume_type], self.server_list, self.brick_roots, force=True) self.mountpoint = (f"/mnt/{self.vol_name}") for client in self.client_list: self.redant.execute_abstract_op_node( f"mkdir -p " f"{self.mountpoint}", client) self.redant.volume_mount(self.server_list[0], self.vol_name, self.mountpoint, client) self.run_test(self.redant) except Exception as error: tb = traceback.format_exc() self.redant.logger.error(error) self.redant.logger.error(tb) if self.TEST_RES is None: self.SKIP_REASON = error else: self.TEST_RES = False def terminate(self): """ Closes connection for now. """ # Check if all nodes are up and running. for machine in self.server_list + self.client_list: ret = self.redant.wait_node_power_up(machine) if not ret: self.redant.logger.error(f"{machine} is offline.") # Validate that glusterd is up and running in the servers. self.redant.start_glusterd(self.server_list) if not self.redant.wait_for_glusterd_to_start(self.server_list): raise Exception("Glusterd start failed.") try: # Peer probe and validate all peers are in connected state. self.redant.peer_probe_servers(self.server_list, self.server_list[0]) except Exception as error: tb = traceback.format_exc() self.redant.logger.error(error) self.redant.logger.error(tb) try: for (opt, _) in self.redant.es.get_vol_options_all().items(): self.redant.reset_volume_option('all', opt, self.server_list[0]) self.redant.cleanup_volumes(self.server_list) except Exception as error: tb = traceback.format_exc() self.redant.logger.error(error) self.redant.logger.error(tb) self.redant.hard_terminate(self.server_list, self.client_list, self.brick_roots) finally: self.redant.deconstruct_connection()
class environ: """ Framework level control on the gluster environment. Controlling both the setup and the cleanup. """ def __init__(self, param_obj, es, error_handler, log_path: str, log_level: str): """ Redant mixin obj to be used for server setup and teardown operations has to be created. """ self.spinner = Halo(spinner='dots') self.redant = RedantMixin(param_obj.get_server_config(), param_obj.get_client_config(), es) self.redant.init_logger("environ", log_path, log_level) try: self.redant.establish_connection() except paramiko.ssh_exception.NoValidConnectionsError as e: error_handler( e, ''' It seems one of the nodes is down. Message: {exc}. Check and run again. ''') except paramiko.ssh_exception.AuthenticationException as e: error_handler( e, """ Authentication failed. Message: {exc} Check and run again. """) except timeout as e: error_handler( e, """ Oops! There was a timeout connecting the servers. Message: {exc} Check and run again. """) except Exception as e: error_handler(e) self.server_list = param_obj.get_server_ip_list() self.client_list = param_obj.get_client_ip_list() self.brick_root = param_obj.get_brick_roots() def get_framework_logger(self): """ To return the framework logger object """ return self.redant.logger def _transfer_files_to_machines(self, machines: list, spath: str, dpath: str): """ Transfers files from source path to destination path """ remove = False if self.redant.path_exists(machines, [dpath]): remove = True for node in machines: self.redant.logger.info(f'Copying file to {node}') self.redant.transfer_file_from_local(spath, dpath, node, remove) def _check_and_copy_io_script(self): """ Check if the I/O script exists in the client machines. If not transfer it there. """ io_script_dpath = '/tmp/file_dir_ops.py' io_script_spath = f'{os.getcwd()}/tools/file_dir_ops.py' self._transfer_files_to_machines( list(set(self.client_list + self.server_list)), io_script_spath, io_script_dpath) def _list_of_machines_without_arequal(self, machines: list): """ This function returns the list of machines without arequal checksum installed on it. """ cmd = "arequal-checksum" machines = set(machines) arequal_machines = [] for machine in machines: ret = self.redant.execute_abstract_op_node(cmd, machine, False) if ret['error_code'] != 64: arequal_machines.append(machine) return arequal_machines def _check_and_install_arequal_checksum(self): """ Checks if arequal checksum is present on the servers and clients and if not present installs it. """ arequal_dpath = '/tmp/arequal_install.sh' arequal_spath = f'{os.getcwd()}/tools/arequal_install.sh' arequal_machines = self._list_of_machines_without_arequal( self.client_list + self.server_list) if len(arequal_machines) > 0: self._transfer_files_to_machines(arequal_machines, arequal_spath, arequal_dpath) cmd = f"sh {arequal_dpath}" self.redant.execute_abstract_op_multinode(cmd, arequal_machines) def setup_env(self): """ Setting up of the environment before the TC execution begins. """ # invoke the hard reset or hard terminate. self.spinner.start("Setting up environment") self.redant.hard_terminate(self.server_list, self.client_list, self.brick_root) try: self.redant.start_glusterd(self.server_list) self.redant.create_cluster(self.server_list) self._check_and_copy_io_script() self._check_and_install_arequal_checksum() self.spinner.succeed("Environment setup successful.") except Exception as error: tb = traceback.format_exc() self.redant.logger.error(error) self.redant.logger.error(tb) self.spinner.fail("Environment setup failed.") sys.exit(0) def teardown_env(self): """ The teardown of the complete environment once the test framework ends. """ self.spinner.start("Tearing down environment.") try: self.redant.hard_terminate(self.server_list, self.client_list, self.brick_root) self.spinner.succeed("Tearing down successful.") except Exception as error: tb = traceback.format_exc() self.redant.logger.error(error) self.redant.logger.error(tb) self.spinner.fail("Environment Teardown failed.")