def run_math(self, pool: futures.ThreadPoolExecutor): ''' Run diffcrash math Parameters ---------- pool : `concurrent.futures.ThreadPoolExecutor` multiprocessing pool ''' msg = "Running Math ... \r" print(str_running(msg), end='', flush=True) self.logger.info(msg) start_time = time.time() return_code_future = pool.submit(run_subprocess, [ os.path.join(self.diffcrash_home, "DFC_Math_" + self.crash_code), self.project_dir ]) returnCode = return_code_future.result() # check return code if returnCode != 0: msg = "Running Math ... done in {0:.2f}s ".format(time.time() - start_time) print(str_error(msg)) self.logger.error(msg) err_msg = "Caught a nonzero return code '{0}'".format(returnCode) self.logger.error(err_msg) raise RuntimeError(str_error(err_msg)) # check logs messages = self.check_if_logfiles_show_success("DFC_MATH*.log") if messages: # print failure msg = "Running Math ... done in {0:.2f}s ".format(time.time() - start_time) print(str_error(msg)) self.logger.error(msg) # print failed logs for msg in messages: print(str_error(msg)) self.logger.error(msg) err_msg = "Logfile does indicate a failure. Please check the log files in '{0}'.".format( self.logfile_dir) self.logger.error(err_msg) raise RuntimeError(str_error(err_msg)) # print success msg = "Running Math ... done in {0:.2f}s ".format(time.time() - start_time) print(str_success(msg)) self.logger.info(msg)
def run_merge(self, pool: futures.ThreadPoolExecutor): ''' Run diffcrash merge Parameters ---------- pool : `concurrent.futures.ThreadPoolExecutor` multiprocessing pool ''' msg = "Running Merge ... " print(str_running(msg) + '\r', end='', flush=True) self.logger.info(msg) # create ionput file for merge merge_inputfile = self._create_merge_input_file(self.project_dir) # clear previous merges for filepath in glob.glob(os.path.join(self.project_dir, "mode_*")): if os.path.isfile(filepath): os.remove(filepath) # run the thing start_time = time.time() return_code_future = pool.submit(run_subprocess, [os.path.join(self.diffcrash_home, "DFC_Merge_All_" + self.crash_code), self.project_dir, merge_inputfile]) return_code = return_code_future.result() # check return code if return_code != 0: msg = "Running Merge ... done in {0:.2f}s ".format(time.time() - start_time) print(str_error(msg)) self.logger.info(msg) msg = "The process failed somehow." self.logger.error(msg) raise RuntimeError(str_error(msg)) # check logfiles messages = self.check_if_logfiles_show_success("DFC_Merge_All.log") if messages: msg = "Running Merge ... done in {0:.2f}s ".format(time.time() - start_time) print(str_error(msg)) self.logger.error(msg) for msg in messages: print(str_error(msg)) self.logger.info(msg) msg = "DFC_Merge_All failed. Please check the log files in '{0}'.".format( self.logfile_dir) self.logger.error(msg) raise RuntimeError(str_error(msg)) # print success msg = "Running Merge ... done in {0:.2f}s ".format(time.time() - start_time) print(str_success(msg)) self.logger.info(msg)
def run_eigen(self, pool: futures.ThreadPoolExecutor): ''' Run diffcrash eigen Parameters ---------- pool : `concurrent.futures.ThreadPoolExecutor` multiprocessing pool ''' msg = "Running Eigen ... " print(str_running(msg) + '\r', end='', flush=True) self.logger.info(msg) # create input file for process eigen_inputfile = self._create_eigen_input_file(self.project_dir) # run the thing start_time = time.time() return_code_future = pool.submit(run_subprocess, [os.path.join(self.diffcrash_home, "DFC_Eigen_" + self.crash_code), self.project_dir, eigen_inputfile]) # please hold the line ... return_code = return_code_future.result() # check return code if return_code != 0: msg ="Running Eigen ... done in {0:.2f}s ".format(time.time() - start_time) print(str_error(msg)) self.logger.error(msg) msg = "The process failed somehow." self.logger.error(msg) raise RuntimeError(str_error(msg)) # check log file messages = self.check_if_logfiles_show_success("DFC_Matrix_*") if messages: # print failure msg = "Running Eigen ... done in {0:.2f}s ".format(time.time() - start_time) print(str_error(msg)) self.logger.error(msg) # print why for msg in messages: print(str_error(msg)) self.logger.error(msg) msg = "DFC_Eigen failed. Please check the log files in '{0}'.".format(self.logfile_dir) self.logger.error(msg) raise RuntimeError(str_error(msg)) # print success msg = "Running Eigen ... done in {0:.2f}s ".format(time.time() - start_time) print(str_success(msg)) self.logger.info(msg)
def _parse_n_processes(self, n_processes) -> int: print(str_info(self._msg_option.format("n-processes", n_processes))) if n_processes <= 0: err_msg = "n-processes is '{0}' but must be at least 1.".format( n_processes) self.logger.error(err_msg) raise ValueError(str_error(err_msg)) return n_processes
def parse_stages(start_stage: str, end_stage: str): # check validity if start_stage not in DC_STAGES or end_stage not in DC_STAGES: raise ValueError( str_error("{0} is not a valid stage. Try: {1}.".format( start_stage, ", ".join(DC_STAGES)))) # get indexes start_stage_index = DC_STAGES.index(start_stage) end_stage_index = DC_STAGES.index(end_stage) # check if start and end are in correct order if start_stage_index > end_stage_index: raise ValueError( str_error( "The specified end stage '{0}' comes before the start stage ({1}). Try the order: {2}" .format(end_stage, start_stage, ', '.join(DC_STAGES)))) return start_stage_index, end_stage_index
def _deserialize_args(self, pb_function_arguments): ''' Deserialize function arguments Parameters ---------- pb_function_arguments : `list` of `lasso.ansa.rpc.FunctionArgument` function arguments to iterate over Returns ------- args : `list` argument list kwargs : `dict` kwargs dictionary for function ''' # these guys or ladies get returned args = [] kwargs = {} for function_argument in pb_function_arguments: # get argument info argument_index = function_argument.index argument_name = function_argument.name argument_value = pickle.loads(function_argument.value.data) # convert entities back to ansa entities argument_value = _convert_any_ansa_entities(argument_value) # I belong to KWARGS if len(argument_name) != 0: kwargs[argument_name] = argument_value # I belong to ARGS else: # sorry, these guys are not allowed if argument_index < 0: raise RuntimeError( str_error( _msg_invalid_argument_index.format( argument_index))) # extend args if required if len(args) <= argument_index: args.extend([None] * (argument_index - len(args) + 1)) args[argument_index] = argument_value return args, kwargs
def _parse_simulation_runs(self, simulation_run_patterns: typing.Sequence[str], reference_run: str, exclude_runs: typing.Sequence[str]): # search all denoted runs simulation_runs = [] for pattern in simulation_run_patterns: simulation_runs += glob.glob(pattern) simulation_runs = [ filepath for filepath in simulation_runs if os.path.isfile(filepath)] # search all excluded runs runs_to_exclude = [] for pattern in exclude_runs: runs_to_exclude += glob.glob(pattern) runs_to_exclude = [ filepath for filepath in runs_to_exclude if os.path.isfile(filepath)] n_runs_before_filtering = len(simulation_runs) simulation_runs = [filepath for filepath in simulation_runs if filepath not in runs_to_exclude] n_runs_after_filtering = len(simulation_runs) # remove the reference run if reference_run in simulation_runs: simulation_runs.remove(reference_run) # sort it because we can! def atoi(text): return int(text) if text.isdigit() else text def natural_keys(text): return [atoi(c) for c in re.split(r'(\d+)', text)] simulation_runs = sorted(simulation_runs, key=natural_keys) # check simulation_runs_ok = len(simulation_runs) != 0 msg = self._msg_option.format( "# simul.-files", len(simulation_runs)) print(str_info(msg)) self.logger.info(msg) msg = self._msg_option.format( "# excluded files", (n_runs_before_filtering-n_runs_after_filtering)) print(str_info(msg)) self.logger.info(msg) if not simulation_runs_ok: err_msg = "No simulation files could be found with the specified patterns. Check the argument 'simulation_runs'." self.logger.error(err_msg) raise RuntimeError(str_error(err_msg)) return simulation_runs
def _parse_reference_run(self, reference_run) -> str: reference_run_ok = os.path.isfile(reference_run) msg = self._msg_option.format("reference-run", reference_run) print(str_info(msg)) self.logger.info(msg) if not reference_run_ok: err_msg = "Filepath '{0}' is not a file.".format(reference_run) self.logger.error(err_msg) raise RuntimeError(str_error(err_msg)) return reference_run
def _parse_diffcrash_home(self, diffcrash_home) -> str: diffcrash_home_ok = len(diffcrash_home) != 0 msg = self._msg_option.format("diffcrash-home", diffcrash_home) print(str_info(msg)) self.logger.info(msg) if not diffcrash_home_ok: err_msg = "Specify the path to the Diffcrash installation either with the environment variable 'DIFFCRASHHOME' or the option --diffcrash-home." self.logger.error(err_msg) raise RuntimeError(str_error(err_msg)) return diffcrash_home
def _parse_crash_code(self, crash_code) -> str: # these guys are allowed valid_crash_codes = ["dyna", "radioss", "pam"] # do the thing crash_code_ok = crash_code in valid_crash_codes print(str_info(self._msg_option.format("crash-code", crash_code))) self.logger.info(self._msg_option.format("crash-code", crash_code)) if not crash_code_ok: err_msg = "Invalid crash code '{0}'. Please use one of: {1}".format( crash_code, str(valid_crash_codes)) self.logger.error(err_msg) raise RuntimeError(str_error(err_msg)) return crash_code
def check_if_logfiles_show_success(self, pattern: str) -> List[str]: ''' Check if a logfiles with given pattern show success Parameters ---------- pattern : `str` file pattern used to search for logfiles Returns ------- messages : `list` list with messages of failed log checks ''' _msg_logfile_nok = str_error("Logfile '{0}' reports no success.") messages = [] logfiles = glob.glob(os.path.join(self.logfile_dir, pattern)) for filepath in logfiles: if not self.is_logfile_successful(filepath): # logger.warning() messages.append(_msg_logfile_nok.format(filepath)) return messages
def get_ansa_server_command(ansa_filepath: str, python33_path: str, port: int) -> str: ''' Assemble the ansa command for running the server Parameters ---------- ansa_filepath : `str` path to ansa executable python33_path : `str` path to python 3.3 whose site-packages shall be used port : `int` port to run remote scripting on Returns ------- cmd : `list` of `str` ansa command for running the server ''' # path to the script, which ansa has to run current_dir = os.path.dirname(__file__) server_script_filepath = os.path.join(current_dir, "server_ansa.py") # ansa filepath if not ansa_filepath: if os.name == 'nt': ansa_filepath = 'ansa64.bat' elif os.name == 'posix': ansa_filepath = 'ansa64.sh' print(str_info(_msg_ansa_filepath_unspecified.format(ansa_filepath))) # basic commands cmd = [ansa_filepath, '-nolauncher'] # show a gui? cmd.append("-nogui") # find the site-packages dir if python33_path: site_packages_dirs = glob.glob(os.path.join(python33_path, '**', 'site-packages'), recursive=True) if len(site_packages_dirs) == 0: raise RuntimeError( str_error(_msg_no_site_packages.format(python33_path))) if len(site_packages_dirs) > 1: raise RuntimeError( str_error(_msg_multiple_site_packages.format(python33_path))) site_packages_path = site_packages_dirs[0] os.environ["ANSA_REST_SITE_PACKAGES_PATH"] = site_packages_path # check if required libs are installed required_libs = ["flask*"] for required_lib_name in required_libs: if not glob.glob( os.path.join(site_packages_path, required_lib_name)): raise RuntimeError( str_error( _msg_missing_library.format(required_lib_name, site_packages_path))) # python33 path not specified (print an info) elif not python33_path: print(str_info(_msg_site_packages_unspecified)) site_packages_path = "" # this function in the script will be run with the following arguments cmd.append("-execscript") cmd.append(server_script_filepath) script_command = "\"serve({port})\"" cmd.append("-execpy") cmd.append( script_command.format( site_packages_path=site_packages_path, port=port, )) return cmd
def serve(port, interactive, enable_logging): ''' Run the grpc server ''' print_header() # set logging if enable_logging: logging.basicConfig(level=logging.INFO) fmt_settings = "{0:14}: {1}" logging.info(str_info(fmt_settings.format("port", port))) logging.info(str_info(fmt_settings.format("interactive", interactive))) logging.info(str_info(fmt_settings.format( "enable_logging", enable_logging))) # grpc server options # We increase the transfer limit from 4MB to 1GB here # This is seriously bad since big stuff should be streamed # but I'm not getting paid for this. gigabyte = 1024 ** 3 options = [ ('grpc.max_send_message_length', gigabyte), ('grpc.max_receive_message_length', gigabyte) ] # run server # Note: Since ANSA is not threadsafe we allow only 1 worker. server = grpc.server( futures.ThreadPoolExecutor(max_workers=1), options=options ) # register our driver driver_servicer = LassoAnsaDriverServicer() AnsaGRPC_pb2_grpc.add_LassoAnsaDriverServicer_to_server( driver_servicer, server) port = server.add_insecure_port('[::]:{}'.format(port)) # check if port was fine if port == 0: logging.error(_msg_port_taken.format(port)) raise RuntimeError(str_error(_msg_port_taken.format(port))) # finally start server server.start() # let main process wait or make ANSA interactively accessible # while the threadpool process handles incoming commands try: if interactive: if enable_logging: print() import code code.interact(local=locals()) else: while True: # time.sleep(60 * 60 * 24) time.sleep(1) if driver_servicer.please_shutdown: raise KeyboardInterrupt() except KeyboardInterrupt: logging.info(_msg_stopping_server) server.stop(0) except Exception as err: logging.error(str_error(_msg_some_weird_error.format(str(err)))) server.stop(0)
''' # this is a utility for the command line usage try: sys.path.append(os.environ["ANSA_GRPC_SITE_PACKAGES_PATH"]) except KeyError as err: print(str_warn(_msg_site_packages_dir_not_set.format( "ANSA_GRPC_SITE_PACKAGES_PATH"))) try: import AnsaGRPC_pb2 import AnsaGRPC_pb2_grpc import grpc from utils import Entity, pickle_object except ImportError as err: raise RuntimeError(str_error(_msg_import_error.format(str(err)))) def print_header(): header = ''' ANSA Remote Scripting Server by {0} ------------------------------------------ '''.format(ConsoleColoring.blue("LASSO GmbH", light=True)) print(header) def _serialize(obj): ''' Serializes an arbitrary object for transfer
def run_export(self, pool: futures.ThreadPoolExecutor): ''' Run diffcrash export Parameters ---------- pool : `concurrent.futures.ThreadPoolExecutor` multiprocessing pool ''' msg = "Running Export ... " print(str_running(msg) + '\r', end='', flush=True) self.logger.info(msg) if self.config_file is None: export_item_list = [] # check for pdmx pdmx_filepath_list = glob.glob( os.path.join(self.project_dir, "*_pdmx")) if pdmx_filepath_list: export_item_list.append(os.path.basename( pdmx_filepath_list[0])) # check for pdij pdij_filepath_list = glob.glob( os.path.join(self.project_dir, "*_pdij")) if pdij_filepath_list: export_item_list.append(os.path.basename( pdij_filepath_list[0])) else: export_item_list = self.read_config_file(self.config_file) # remove previous existing exports for export_item in export_item_list: export_item_filepath = os.path.join(self.project_dir, export_item + ".d3plot.fz") if os.path.isfile(export_item_filepath): os.remove(export_item_filepath) # do the thing start_time = time.time() return_code_futures = [ pool.submit(run_subprocess, [ os.path.join(self.diffcrash_home, "DFC_Export_" + self.crash_code), self.project_dir, export_item ]) for export_item in export_item_list ] return_codes = [ result_future.result() for result_future in return_code_futures ] # check return code if any(rc != 0 for rc in return_codes): msg = "Running Export ... done in {0:.2f}s ".format(time.time() - start_time) print(str_error(msg)) self.logger.error(msg) for i_export, export_return_code in enumerate(return_codes): if export_return_code != 0: msg = "Return code of export '{0}' was nonzero: '{1}'".format( export_item_list[i_export], export_return_code) self.logger.error(msg) print(str_error(msg)) msg = "At least one export process failed." self.logger.error(msg) raise RuntimeError(str_error(msg)) # check logs messages = self.check_if_logfiles_show_success("DFC_Export_*") if messages: # print failure msg = "Running Export ... done in {0:.2f}s ".format(time.time() - start_time) print(str_error(msg)) self.logger.error(msg) # print logs for msg in messages: print(str_error(msg)) self.logger.error(msg) msg = "At least one export failed. Please check the log files in '{0}'.".format( self.logfile_dir) self.logger.error(msg) raise RuntimeError(str_error(msg)) # print success msg = "Running Export ... done in {0:.2f}s ".format(time.time() - start_time) print(str_success(msg)) self.logger.info(msg)
def run_import(self, pool: futures.ThreadPoolExecutor): ''' Run diffcrash import of runs Parameters ---------- pool : `concurrent.futures.ThreadPoolExecutor` multiprocessing pool ''' # list of arguments to run in the command line import_arguments = [] # id 1 is the reference run # id 2 and higher are the imported runs counter_offset = 2 # assemble arguments for running the import # entry 0 is the reference run, thus we start at 1 for i_filepath in range(len(self.simulation_runs)): # parameter file missing if self.parameter_file is None: if self.use_id_mapping: args = [ os.path.join(self.diffcrash_home, "DFC_Import_" + self.crash_code + "_fem"), "-id", self.simulation_runs[i_filepath], self.project_dir, str(i_filepath + counter_offset) ] else: args = [ os.path.join(self.diffcrash_home, "DFC_Import_" + self.crash_code + "_fem"), self.simulation_runs[i_filepath], self.project_dir, str(i_filepath + counter_offset) ] # indeed there is a parameter file else: if self.use_id_mapping: args = [ os.path.join(self.diffcrash_home, "DFC_Import_" + self.crash_code), "-ID", self.simulation_runs[i_filepath], self.project_dir, str(i_filepath + counter_offset) ] else: args = [ os.path.join(self.diffcrash_home, "DFC_Import_" + self.crash_code), self.simulation_runs[i_filepath], self.project_dir, str(i_filepath + counter_offset) ] # append args to list import_arguments.append(args) # do the thing msg = "Running Imports ...\r" print(str_running(msg), end='', flush=True) self.logger.info(msg) start_time = time.time() return_code_futures = [ pool.submit(run_subprocess, args) for args in import_arguments ] # wait for imports to finish (with a progressbar) n_imports_finished = sum(return_code_future.done() for return_code_future in return_code_futures) while n_imports_finished != len(return_code_futures): # check again n_new_imports_finished = sum( return_code_future.done() for return_code_future in return_code_futures) # print percentage = n_new_imports_finished / \ len(return_code_futures) * 100 if n_imports_finished != n_new_imports_finished: msg = "Running Imports ... [{0}/{1}] - {2:3.2f}%\r".format( n_new_imports_finished, len(return_code_futures), percentage) print(str_running(msg), end='', flush=True) self.logger.info(msg) n_imports_finished = n_new_imports_finished # wait a little bit time.sleep(0.25) return_codes = [ return_code_future.result() for return_code_future in return_code_futures ] # print failure if any(return_code != 0 for return_code in return_codes): n_failed_runs = 0 for i_run, return_code in enumerate(return_codes): if return_code != 0: _err_msg = str_error( "Run {0} failed to import with error code '{1}'.". format(i_run, return_code)) print(str_error(_err_msg)) self.logger.error(_err_msg) n_failed_runs += 1 err_msg = "Running Imports ... done in {0:.2f}s ".format( time.time() - start_time) print(str_error(err_msg)) self.logger.error(err_msg) err_msg = "Import of {0} runs failed.".format(n_failed_runs) self.logger.error(err_msg) raise RuntimeError(str_error(err_msg)) # check log files messages = self.check_if_logfiles_show_success('DFC_Import_*.log') if messages: # print failure msg = "Running Imports ... done in {0:.2f}s ".format( time.time() - start_time) print(str_error(msg)) self.logger.info(msg) # print failed logs for msg in messages: self.logger.error(msg) print(str_error(msg)) err_msg = "At least one import failed. Please check the log files in '{0}'.".format( self.logfile_dir) self.logger.error(err_msg) raise RuntimeError(str_error(err_msg)) # print success print( str_success("Running Imports ... done in {0:.2f}s ").format( time.time() - start_time))
def get_ansa_server_command(ansa_filepath: str, python33_path: str, port: int, interactive: bool, show_gui: bool, enable_logging: bool) -> str: ''' Assemble the ansa command for running the server Parameters ---------- ansa_filepath : `str` path to ansa executable python33_path : `str` path to python 3.3 whose site-packages shall be used port : `int` port to run remote scripting on interactive : `bool` run ansa server in interactive mode show_gui : `bool` show a GUI of the server enable_logging : `bool` whether to log actions run in ANSA Returns ------- cmd : `list` of `str` ansa command for running the server ''' # path to the script, which ansa has to run current_dir = os.path.dirname(__file__) server_script_filepath = os.path.join(current_dir, "server_ansa.py") # ansa filepath if not ansa_filepath: if os.name == 'nt': ansa_filepath = 'ansa64.bat' elif os.name == 'posix': ansa_filepath = 'ansa64.sh' print(str_info(_msg_ansa_filepath_unspecified.format(ansa_filepath))) # basic commands cmd = [ansa_filepath, '-nolauncher'] # show a gui? if not show_gui: cmd.append("-nogui") # find the site-packages dir if python33_path: site_packages_dirs = glob.glob(os.path.join(python33_path, '**', 'site-packages'), recursive=True) if len(site_packages_dirs) == 0: raise RuntimeError( str_error(_msg_no_site_packages.format(python33_path))) if len(site_packages_dirs) > 1: raise RuntimeError( str_error(_msg_multiple_site_packages.format(python33_path))) site_packages_path = site_packages_dirs[0] os.environ["ANSA_GRPC_SITE_PACKAGES_PATH"] = site_packages_path # check if required libs are installed # required_libs = ["grpc*", "enum*", "protobuf3*", "google*"] # for required_lib_name in required_libs: # if not glob.glob(os.path.join(site_packages_path, required_lib_name)): # raise RuntimeError(str_error(_msg_import_error.format( # _msg_missing_library.format(required_lib_name, site_packages_path) # ))) # python33 path not specified (print an info) elif not python33_path: print(str_info(_msg_site_packages_unspecified)) site_packages_path = "" # this function in the script will be run with the following arguments cmd.append("-execscript") cmd.append(server_script_filepath) script_command = "\"serve({port},{interactive},{enable_logging})\"" cmd.append("-execpy") cmd.append( script_command.format(site_packages_path=site_packages_path, port=port, interactive=interactive, enable_logging=enable_logging)) return cmd
def run_setup(self, pool: futures.ThreadPoolExecutor): ''' Run diffcrash setup Parameters ---------- pool : `concurrent.futures.ThreadPoolExecutor` multiprocessing pool ''' # SETUP msg = "Running Setup ... " print(str_running(msg) + '\r', end='', flush='') self.logger.info(msg) args = [] if self.config_file is None and self.parameter_file is None: args = [ os.path.join(self.diffcrash_home, "DFC_Setup_" + self.crash_code + "_fem"), self.reference_run, self.project_dir ] elif self.config_file is not None and self.parameter_file is None: args = [ os.path.join(self.diffcrash_home, "DFC_Setup_" + self.crash_code + "_fem"), self.reference_run, self.project_dir, "-C", self.config_file ] elif self.config_file is None and self.parameter_file is not None: if ".fz" in self.reference_run: args = [ os.path.join(self.diffcrash_home, "DFC_Setup_" + self.crash_code + "_fem"), self.reference_run, self.project_dir, "-P", self.parameter_file ] else: args = [ os.path.join(self.diffcrash_home, "DFC_Setup_" + self.crash_code), self.reference_run, self.project_dir, "-P", self.parameter_file ] elif self.config_file is not None and self.parameter_file is not None: if ".fz" in self.reference_run: args = [ os.path.join(self.diffcrash_home, "DFC_Setup_" + self.crash_code + "_fem"), self.reference_run, self.project_dir, "-C", self.config_file, "-P", self.parameter_file ] else: args = [ os.path.join(self.diffcrash_home, "DFC_Setup_" + self.crash_code), self.reference_run, self.project_dir, "-C", self.config_file, "-P", self.parameter_file ] start_time = time.time() # submit task return_code_future = pool.submit(run_subprocess, args) return_code = return_code_future.result() # check return code if return_code != 0: err_msg = "Running Setup ... done in {0:.2f}s".format(time.time() - start_time) print(str_error(err_msg)) self.logger.error(err_msg) err_msg = "Process somehow failed." self.logger.error(err_msg) raise RuntimeError(str_error(err_msg)) # check log messages = self.check_if_logfiles_show_success("DFC_Setup.log") if messages: err_msg = "Running Setup ... done in {0:.2f}s".format(time.time() - start_time) print(str_error(err_msg)) self.logger.error(err_msg) # print failed logs for msg in messages: print(str_error(msg)) self.logger.error(msg) err_msg = "Setup failed." self.logger.error(err_msg) raise RuntimeError(str_error(err_msg)) # print success msg = "Running Setup ... done in {0:.2f}s".format(time.time() - start_time) print(str_success(msg)) self.logger.info(msg)