def test_self_test(self): """Run a few CaRT self-test scenarios. :avocado: tags=all,pr,smoke,unittest,tiny,cartselftest """ # Setup the orterun command orterun = Orterun(SelfTest(self.bin)) orterun.map_by.update(None, "orterun/map_by") orterun.enable_recovery.update(False, "orterun/enable_recovery") # Get the self_test command line parameters orterun.job.get_params(self) orterun.job.group_name.update(self.server_group, "group_name") orterun.job.message_sizes.update( self.params.get("size", "/run/muxtestparams/message_size/*")[0], "message_sizes") orterun.job.attach_info.update(os.path.dirname(self.uri_file), "attach_info") # Setup the environment variables for the self_test orterun command orterun.assign_environment(self.cart_env) # Run the test try: orterun.run() except CommandFailure as error: self.test_log.info("CaRT self_test returned non-zero: %s", str(error)) self.fail("CaRT self_test returned non-zero")
def test_self_test(self): """Run a few CaRT self-test scenarios. :avocado: tags=all,smoke,unittest,tiny,cartselftest """ # Setup the orterun command orterun = Orterun(SelfTest(self.cart_bin)) orterun.ompi_server.update("file:{}".format(self.uri_file), "orterun/ompi_server") orterun.map_by.update(None, "orterun/map_by") orterun.enable_recovery.update(False, "orterun/enable_recovery") # Get the self_test command line parameters orterun.job.get_params(self) orterun.job.group_name.value = self.server_group # Setup the environment variables for the self_test orterun command orterun.assign_environment(self.cart_env) # Run the test try: orterun.run() except CommandFailure as error: self.test_log.info("CaRT self_test returned non-zero: %s", str(error)) self.fail("CaRT self_test returned non-zero")
def run_conf(self): """Run the daos_run_io_conf command as a foreground process. Raises: None """ command = " ".join([os.path.join(self._path, "daos_run_io_conf"), self.filename.value]) manager = Orterun(command) # run daos_run_io_conf Command using Openmpi manager.run()
def run_test(self): """Run the HDF5 VOL testsuites. Raises: VolFailed: for an invalid test name or test execution failure """ # initialize test specific variables mpi_type = self.params.get("mpi_type", default="mpich") test_repo = self.params.get("daos_vol_repo") plugin_path = self.params.get("plugin_path") # test_list = self.params.get("daos_vol_tests", default=[]) testname = self.params.get("testname") client_processes = self.params.get("client_processes") # create pool, container and dfuse mount self.add_pool(connect=False) self.add_container(self.pool) # VOL needs to run from a file system that supports xattr. # Currently nfs does not have this attribute so it was recommended # to create a dfuse dir and run vol tests from there. # create dfuse container self.start_dfuse(self.hostlist_clients, self.pool, self.container) # for test_param in test_list: # testname = test_param[0][1] # client_processes = test_param[1][1] exe = os.path.join(test_repo, testname) if mpi_type == "openmpi": manager = Orterun(exe, subprocess=False) else: manager = Mpirun(exe, subprocess=False, mpitype="mpich") env = EnvironmentVariables() env["DAOS_POOL"] = "{}".format(self.pool.uuid) env["DAOS_SVCL"] = "{}".format(self.pool.svc_ranks[0]) env["DAOS_CONT"] = "{}".format(self.container.uuid) env["HDF5_VOL_CONNECTOR"] = "daos" env["HDF5_PLUGIN_PATH"] = "{}".format(plugin_path) manager.assign_hosts(self.hostlist_clients) manager.assign_processes(client_processes) manager.assign_environment(env, True) manager.working_dir.value = self.dfuse.mount_dir.value # run VOL Command try: manager.run() except CommandFailure as _error: self.fail("{} FAILED> \nException occurred: {}".format( exe, str(_error)))
def run_conf(self, dmg_config_file): """Run the daos_run_io_conf command as a foreground process. Args: dmg_config_file: dmg file to run test. Return: Result bool: True if command success and false if any error. """ success_msg = 'daos_run_io_conf completed successfully' command = " ".join([os.path.join(self._path, "daos_run_io_conf"), " -n ", dmg_config_file, self.filename.value]) manager = Orterun(command, mpi_type=self.mpi_type) # run daos_run_io_conf Command using Openmpi try: out = manager.run() # Return False if "ERROR" in stdout for line in out.stdout_text.splitlines(): if 'ERROR' in line: return False # Return False if not expected message to confirm test completed. if success_msg not in out.stdout_text.splitlines()[-1]: return False # Return False if Command failed. except CommandFailure: return False return True
def run_daos_perf(self): """Run the daos_perf command.""" # Obtain the number of processes listed with the daos_perf options processes = self.params.get("processes", "/run/daos_perf/*") # Create the daos_perf command from the test yaml file daos_perf = DaosPerfCommand(self.bin) daos_perf.get_params(self) self.log.info("daos_perf command: %s", str(daos_perf)) daos_perf_env = daos_perf.get_environment(self.server_managers[0]) # Create the orterun command orterun = Orterun(daos_perf) orterun.assign_hosts(self.hostlist_clients, self.workdir, None) orterun.assign_processes(processes) orterun.assign_environment(daos_perf_env) self.log.info("orterun command: %s", str(orterun)) # Run the daos_perf command and check for errors result = orterun.run() errors = re.findall( r"(.*(?:non-zero exit code|errors|failed|Failed).*)", result.stdout_text) if errors: self.fail("Errors detected in daos_perf output:\n{}".format( " \n".join(errors)))