def process_task(self, arg_task_file, aTaskDir): try: # get the subdirectory index my_ndx = self.mAppsInfo.getNextIndex(aTaskDir) # form sub task directory sub_task_dir = PathUtils.append_path( PathUtils.include_trailing_path_delimiter(aTaskDir), "%05d" % my_ndx) # save the task template file name with path to the control item self.ctrl_item.fname = arg_task_file # prepare control item content, TODO don't really need it. my_content = self.ctrl_item.prepare(self.mAppsInfo, arg_task_file) my_queue_item = ProcessQueueItem(sub_task_dir, self.ctrl_item, self.mAppsInfo, my_content) self.mProcessQueue.enqueue(my_queue_item) except Exception as arg_ex: Msg.error_trace() Msg.err(str(arg_ex)) # reraise to prevent adding to summary instance raise finally: pass
def process_summary(self, arg_sum_level=SummaryLevel.Fail): # Msg.user( "process_summary()", "REG-SUMMARY" ) my_utcdt = DateTime.UTCNow() my_file_name = "%sregression_summary.log" % ( PathUtils().include_trailing_path_delimiter(self.summary_dir) ) # Msg.user( "Master Log File: %s" % ( my_file_name )) my_ofile = None myLines = [] # First try to open file with open(my_file_name, "w") as my_ofile: try: my_ofile.write("Date: %s\n" % (DateTime.DateAsStr(my_utcdt))) my_ofile.write("Time: %s\n" % (DateTime.TimeAsStr(my_utcdt))) self.process_errors(my_ofile) my_instr_count, my_cycle_count = self.process_summary_tasks( my_ofile, arg_sum_level ) self.process_summary_totals(my_ofile, my_instr_count, my_cycle_count) except Exception as arg_ex: Msg.error_trace() Msg.err("Processing Summary, " + str(arg_ex)) finally: my_ofile.close()
def execute( self ): my_result = None test_passed = True try: self.build_cmd() self.copyWavesFsdbDoFile() self.outputRerunScript() # report the command line Msg.info( "RTLCommand = " + str( { "rtl-command": self.rtl_cmd } )) # execute the simulation my_result = SysUtils.exec_process( self.rtl_cmd, self.rtl_log, self.rtl_log, self.ctrl_item.timeout, True ) Msg.user( "Results: %s" % ( str( my_result )), "RTLEX-RESULT" "") my_extract_results = self.extract_results( my_result, "./" + self.rtl_log, None ) # report the results Msg.info( "RTLResult = " + str( my_extract_results )) except Exception as arg_ex: Msg.error_trace( "RTL Process Failure" ) Msg.err( "RTL did not properly execute, Reason: %s" % ( str( arg_ex ))) return False finally: pass #return SysUtils.success( int(my_result[ RtlResult.process_retcode ]) ) return test_passed
def process_executors(self): my_ret_val = False try: for my_executor in self.executors: Msg.user( "ExecuteController::process_executors( my_executor: %s )" % str(type(my_executor)), "EXE-CTRL", ) my_executor.pre() if not my_executor.execute(): Msg.user("Executor returning False", "EXE-CTRL") break my_executor.post() my_ret_val = True except Exception as arg_ex: Msg.error_trace() self.report_error(arg_ex) finally: return my_ret_val
def query_logs(self, arg_log, arg_elog): my_errors = None with self.open_log_file(arg_log, "r") as my_hfile: Msg.dbg("File Open: %s" % arg_log) try: my_results = self.query_result_log(my_hfile) except Exception as arg_ex: # NOTE: Determine the possible errors and handle accordingly, # for now just keep processing Msg.error_trace() Msg.err(str(arg_ex)) raise finally: my_hfile.close() if arg_elog is not None: with self.open_log_file(arg_elog, "r") as my_hfile: Msg.dbg("File Open: %s" % arg_elog) try: my_errors = self.query_errors(my_hfile) except Exception as arg_ex: # NOTE: Determine the possible errors and handle # accordingly, for now just keep processing Msg.error_trace() Msg.err(str(arg_ex)) raise finally: my_hfile.close() return my_results, my_errors
def process_summary(self, sum_level=SummaryLevel.Fail): my_file_name = "%sperformance_summary.log" % ( PathUtils().include_trailing_path_delimiter(self.summary_dir)) Msg.dbg("Master Log File: %s" % (my_file_name)) my_utcdt = DateTime.UTCNow() my_ofile = None try: # First try to open file with open(my_file_name, "w") as my_ofile: my_ofile.write("Date: %s\n" % (DateTime.DateAsStr(my_utcdt))) my_ofile.write("Time: %s\n" % (DateTime.TimeAsStr(my_utcdt))) self.process_errors(my_ofile) my_total_count, my_total_elapsed = self.process_groups( my_ofile) Msg.blank("info") my_line = "Total Instructions Generated: %3d\n" % ( my_total_count) my_line += "Total Elapsed Time: %0.3f\n" % (my_total_elapsed) my_line += "Overall Instructions per Second: %0.3f\n" % ( SysUtils.ifthen(bool(my_total_elapsed), my_total_count / my_total_elapsed, 0)) Msg.info(my_line) my_ofile.write(my_line) except Exception as arg_ex: Msg.error_trace() Msg.err("Error Processing Summary, " + str(arg_ex)) finally: my_ofile.close()
def dir_count(arg_class, arg_path=None): try: return len(PathUtils.list_dirs(arg_path)) except: Msg.error_trace() print("Exception") return 0
def process_group_items(self, arg_ofile, arg_items): # Msg.trace("PerformanceSummaryItem::process_group_items") my_grp_count = 0 my_grp_elapsed = 0 try: for my_item in arg_items: my_item_elapsed = my_item.force_end - my_item.force_start my_item_count = my_item.total my_grp_elapsed += my_item_elapsed my_grp_count += my_item_count my_line = "\nTask: %s, Instructions: %d, Elapsed: %0.3f\n" % ( my_item.task_id, my_item_count, my_item_elapsed) arg_ofile.write(my_line) # Msg.dbg( my_line ) #end: for my_task in my_group["tasks"]: except Exception as arg_ex: Msg.error_trace() Msg.err("Error Processing Summary, Reason: %s" % (str(arg_ex))) return my_grp_count, my_grp_elapsed
def load_executors(self): try: # for app_cfg in self.mAppsInfo.mSequenceApps: for seq_app_cfg in self.mAppsInfo.mSequenceApps: my_executor = seq_app_cfg.createExecutor() Msg.user("ExecuteController::load_executors( 2 )") my_executor.load(self.ctrl_item) Msg.user("ExecuteController::load_executors( 1 )") if not my_executor.skip(): Msg.user("ExecuteController::load_executors( 3 )") my_executor.set_frun(self.frun) # Msg.user( "ExecuteController::load_executors( 4 )" ) my_executor.set_task_file(self.task_file) # Msg.user( "ExecuteController::load_executors( 5 )" ) my_executor.set_task_name(self.task_name) # Msg.user( "ExecuteController::load_executors( 5.1 )" ) self.executors.append(my_executor) except Exception as arg_ex: Msg.user("ExecuteController::load_executors( 14 )") Msg.error_trace() self.report_error(arg_ex) finally: Msg.user("ExecuteController::load_executors( 13 )") return True
def run(self): # Msg.user( "Worker Thread Instance Id: %s, (1)" % ( str( id( self ))) , "WORK-THREAD" ) my_sum_qitem = None try: self.setupWorkDir() my_launcher = self.create_launcher() Msg.user("Launcher Id 1: %s" % (str(id(my_launcher))), "WORK-THREAD") # Msg.user( "Worker Thread Instance Id: %s, (2)" % ( str( id( self ))) , "WORK-THREAD" ) my_launcher.launch() Msg.user("Launcher Id 2: %s" % (str(id(my_launcher))), "WORK-THREAD") # Msg.user( "Worker Thread Instance Id: %s, (3)" % ( str( id( self ))) , "WORK-THREAD" ) my_process_result = my_launcher.extract_results() Msg.user("Process Result: %s" % (my_process_result), "WORK-THREAD") Msg.user("Launcher Id 3: %s" % (str(id(my_launcher))), "WORK-THREAD") # self.launcher = my_launcher Msg.user("Process Result: %s" % (my_process_result), "WORK-THREAD") my_sum_qitem = SummaryQueueItem(my_process_result) Msg.user("Created Summary Queue Item", "WORK-THREAD") except Exception as arg_ex: Msg.error_trace(str(arg_ex)) Msg.err("Message: %s, Control File Path: %s" % (str(arg_ex), self.queue_item.work_dir)) my_sum_qitem = SummaryErrorQueueItem({ "error": arg_ex, "message": "Error Processing Task ...", "path": self.queue_item.ctrl_item.file_path(), "type": str(type(arg_ex)) }) finally: # my_launcher = None my_attempt = 0 while (self.summary.queue.enqueue(my_sum_qitem) == False): SysUtils.sleep(100) #heartbeat my_attempt += 1 if (my_attempt % 10) == 0: Msg.dbg("Attempt %d to insert into summary queue" % (my_attempt)) self.thread_count.delta(-1) Msg.user("Thread Count Decremented", "WORK-THREAD") self.done_semaphore.release() Msg.user("Semaphore Released", "WORK-THREAD") Msg.user("Launcher Id 5: %s" % (str(id(self.launcher))), "WORK-THREAD")
def process_summary_task(self, arg_ofile, arg_task, arg_sum_level): my_instr_count = 0 my_cycle_count = 0 # Msg.lout( arg_task, "user", "process_summary_task" ) for my_item in arg_task: try: # Msg.user("arg_sum_level: %s, my_item.force_level: %s " % (str( arg_sum_level ), str( my_item.force_level ))) # Msg.lout( my_item, "user", "Process Summary Item" ) if my_item.has_generate(): # post the generate results my_outline = my_item.get_gen_line() if not my_outline is None: arg_ofile.write(my_outline) if arg_sum_level >= my_item.force_level: Msg.info(my_outline) if my_item.has_simulate(): # post the simulate results my_outline = my_item.get_iss_line() if not my_outline is None: arg_ofile.write(my_outline) if arg_sum_level >= my_item.iss_level: Msg.info(my_outline) my_instr_count += my_item.instr_count if my_item.has_rtl(): # post the rtl run results my_outline = my_item.get_rtl_line() if not my_outline is None: arg_ofile.write(my_outline) if arg_sum_level >= my_item.rtl_level: Msg.info(my_outline) my_cycle_count += my_item.cycle_count try: if my_item.has_trace_cmp(): # post the simulate results my_outline = my_item.get_trace_cmp_line() if not my_outline is None: arg_ofile.write(my_outline) if arg_sum_level >= my_item.trace_cmp_level: Msg.info(my_outline) except: Msg.user("Exception Raised", "GOT HERE") except: Msg.error_trace() Msg.err("Processing Task Index: %s" % (str(my_item.index))) return my_instr_count, my_cycle_count
def write_file(aClass, aFilePath, aContent, aFileType): with open(aFilePath, "w") as my_ofile: try: return (my_ofile.write(aContent) > 0) except Exception as arg_ex: Msg.error_trace() Msg.err("Error Writing %s File, %s" % (aFileType, str(arg_ex))) finally: my_ofile.close() return False
def load(self, arg_queue_item): try: self.load_process_info(arg_queue_item.process_info) self.load_task_info() self.load_process_log() self.report() except Exception as arg_ex: Msg.error_trace() Msg.err(str(arg_ex)) except BaseException: Msg.error_trace()
def instruction_counts(self): my_lines = None my_glog = "%s%s" % ( PathUtils.include_trailing_path_delimiter(self.work_dir), self.force_log, ) Msg.user("Path: %s" % my_glog) with open(my_glog, "r") as my_log: my_lines = my_log.readlines() Msg.dbg("Line %d: %s" % (len(my_lines), my_lines[-1])) my_log.close() try: my_results = [ my_tmp for my_tmp in my_lines if re.search(" Instructions Generated", my_tmp) ] Msg.lout(my_results, "dbg") if not my_results: raise Exception('Instruction Count Not Found in "gen.log"') # ok there are instruction counts located for my_line in my_results: my_line = my_line.strip() # find the instruction type (Total, Default, Secondary) my_lpos = my_line.find("]") my_rpos = my_line.find("Instr") my_type = PerformanceInstructionType.instruction_type( (my_line[my_lpos + 1:my_rpos - 1]).strip()) # get the count for this instruction type my_lpos = my_line.find(":") my_count = int(my_line[my_lpos + 2:].strip()) if my_type == PerformanceInstructionType.Total: self.count = my_count elif my_type == PerformanceInstructionType.Secondary: self.secondary = my_count elif my_type == PerformanceInstructionType.Default: self.default = my_count except ValueError: Msg.error_trace() Msg.err("Unable to extract instruction count from %s" % (int(my_lines[-1]))) return 0
def process(self): for my_ndx in range(0, self.ctrl_item.iterations): # shutdown has been triggered do not do the next iteration if self.is_terminated(): return try: self.process_task_list() except Exception as arg_ex: Msg.error_trace() Msg.err(str(arg_ex)) Msg.blank()
def archive_dir(cls, arg_srcdir): try: # get the base name my_basename = "%s" % (str(DateTime.YMD())) Msg.dbg("Base Name: %s" % (my_basename)) # set the directory mask my_srcdir = PathUtils.exclude_trailing_path_delimiter(arg_srcdir) my_srcmask = "%s_%s_???" % (my_srcdir, my_basename) Msg.dbg("Directory Mask: %s" % (my_srcmask)) # list any previous copies my_dirlist = sorted(PathUtils.list_files(my_srcmask)) Msg.lout(my_dirlist, "dbg") my_findex = 0 # there are only two possiblities here # 1. there is only one match, in which case the mask does not # include a number # 2. there are more than one match in which case the last match # should contain a number if len(my_dirlist) > 0: # remove the wildcards my_srcmask = my_srcmask.replace("???", "") Msg.dbg("New File Mask: %s" % (my_srcmask)) my_tmp = my_dirlist[-1] Msg.dbg("Last Filename: %s" % (my_tmp)) my_tmp = my_tmp.replace(my_srcmask, "") Msg.dbg("My Index Last Filename: %s" % (my_tmp)) my_findex = int(my_tmp) + 1 Msg.dbg("My New Index Filename: %s" % (my_findex)) # get the target name my_tgtdir = "%s_%s_%0.3d" % (my_srcdir, my_basename, my_findex) Msg.dbg("Target Directory: %s" % (my_tgtdir)) return PathUtils.move(my_srcdir, my_tgtdir) except Exception as arg_ex: Msg.error_trace(str(arg_ex)) Msg.err(str(arg_ex)) return False
def process_task_list(self): for my_task_file in self.task_list: # shutdown has been triggered stop processing tasks if self.is_terminated(): return my_curdir = PathUtils.current_dir() Msg.user("Task list current dir %s" % my_curdir) try: # self.process_task_file( my_task_file ) self.process_task_file(my_task_file, my_curdir) except Exception as arg_ex: Msg.error_trace() Msg.err(str(arg_ex)) Msg.blank()
def process_summary_tasks( self, arg_ofile, arg_sum_level ): Msg.blank( "info" ) my_instr_count = 0 my_cycle_count = 0 # get the outer task for my_key in self.tasks: try: arg_ofile.write( "\nTask: %s\n" % ( my_key )) task_instr_count, task_cycle_count = self.process_summary_task( arg_ofile, self.tasks[ my_key ], arg_sum_level ) my_instr_count += task_instr_count my_cycle_count += task_cycle_count except: Msg.error_trace() Msg.err( "Processing Task %s, Skipping Item ...." % ( my_key )) self.total_instruction_count = my_instr_count self.total_cycle_count = my_cycle_count return my_instr_count, my_cycle_count
def exec_content(cls, arg_content, arg_suppress=False, aLocals=None): # control_items = None my_glb = {} my_loc = {} if isinstance(aLocals, dict): import copy my_loc = copy.deepcopy(aLocals) try: # self.fcontrol = exec(str(arg_content), my_glb, my_loc) return my_glb, my_loc except SyntaxError as arg_ex: if not arg_suppress: my_exc_type, my_exc_val, my_exc_tb = sys.exc_info() Msg.err("Syntax Error...") Msg.err("FileName: %s" % (str(my_exc_tb.tb_frame.f_code.co_filename))) Msg.err("Line: %d" % (int(my_exc_tb.tb_lineno))) Msg.err("Name: %s" % (str(my_exc_tb.tb_frame.f_code.co_name))) Msg.err("Type: %s" % (str(my_exc_type))) # Msg.err( "Message" % ( str( my_exc_val ))) Msg.error_trace(my_exc_tb) raise except Exception as arg_ex: Msg.err(str(arg_ex) + ", " + str(type(arg_ex))) except: my_exc_type, my_exc_val, my_exc_tb = sys.exc_info() Msg.err("Unable to process contents of Control File...") Msg.err("FileName: %s" % (str(my_exc_tb.tb_frame.f_code.co_filename))) Msg.err("Line: %d" % (int(my_exc_tb.tb_lineno))) Msg.err("Name: %s" % (str(my_exc_tb.tb_frame.f_code.co_name))) Msg.err("Type: %s" % (str(my_exc_type.name))) Msg.err("Message" % (str(my_exc_val.message))) Msg.error_trace(my_exc_tb) return None
def process_groups(self, arg_ofile): my_total_count = 0 my_total_elapsed = 0 my_groups = self.groups.task_groups() # Msg.trace("PerformanceSummaryItem::process_groups") for my_group, my_items in my_groups.items(): try: my_str = "\nBegin Group: %s\n" % (my_group) arg_ofile.write(my_str) Msg.blank("info") Msg.info(my_str) my_grp_count, my_grp_elapsed = self.process_group_items( arg_ofile, my_items ) my_total_count += my_grp_count my_total_elapsed += my_grp_elapsed my_line = "\nGroup Instructions: %3d\n" % (my_grp_count) my_line += "Group Elapsed Time: %0.3f\n" % (my_grp_elapsed) my_line += "Group Instructions per Second: %0.3f\n" % ( SysUtils.ifthen( bool(my_grp_elapsed), my_grp_count / my_grp_elapsed, 0 ) ) my_line += "End Group: %s\n" % (my_group) Msg.info(my_line) arg_ofile.write(my_line) except Exception as arg_ex: Msg.error_trace() Msg.err( "Unable to process, Group: %s, Reason: %s" % (my_group, type(arg_ex)) ) return my_total_count, my_total_elapsed
def process(self): # Msg.dbg( "TaskController::process()") for my_ndx in range(0, self.ctrl_item.iterations): # shutdown has been triggered do not do the next iteration if self.is_terminated(): return try: self.process_task_list() except Exception as arg_ex: Msg.error_trace() Msg.err(str(arg_ex)) Msg.blank() # self.do_fail() finally: pass
def handle_expire(self, arg_expire, arg_mask): try: Msg.user( "Expire: %s, Mask: %s" % (str(arg_expire), str(arg_mask)), "EXPIRE", ) my_output_base = Formats.main_output_dir % self.output_root Msg.user("Expire [1], Output Base: %s" % (str(my_output_base)), "EXPIRE") Msg.info("Output Directories Cleanup, Please wait ...") if int(arg_expire) == Expire.all: Msg.info("Building Directory List, [%s]" % (my_output_base)) my_dirs = PathUtils.list_dirs(my_output_base) Msg.dbg("All Dirs: %s" % (str(my_dirs)), "EXPIRE") for my_dir in my_dirs: if my_dir.startswith(arg_mask): my_full_dir = "%s%s" % ( PathUtils.include_trailing_path_delimiter( my_output_base), my_dir, ) Msg.info("Removing: %s" % (my_full_dir)) PathUtils.rmdir(my_full_dir, True) else: Msg.info("Checking for Expired Directories: %s" % (my_full_dir)) my_expiredate = DateTime.DateDelta(int(my_expire)) PathUtils.expire(my_full_dir, my_expiredate) except Exception as ex: Msg.error_trace() Msg.err(str(ex)) finally: Msg.info("Operation Complete, Restart Master Run to continue ...") sys.exit(1)
def run(self): # Block on process queue while we have threads running and stuff to do try: while True: # not workers_done_event.isSet(): # Pop off the top of the process queue (should block if the # queue is empty) try: my_qitem = self.summary_queue.dequeue(0) if isinstance(my_qitem, SummaryErrorQueueItem): Msg.user(str(my_qitem.error_info), "SUMMARY_ERROR") my_eitem = SummaryErrorItem() my_eitem.load(my_qitem) self.summary.commit_error_item(my_eitem) else: my_item = self.summary.create_summary_item() my_item.load(my_qitem) self.summary.commit_item(my_item) my_item.clean_up() my_qitem = None except TimeoutError as arg_ex: # Msg.dbg( str( arg_ex ) ) if workers_done_event.isSet(): break else: self.HeartBeat() continue except Exception as arg_ex: Msg.error_trace() Msg.err(str(arg_ex)) raise except BaseException: Msg.error_trace() raise finally: summary_done_event.Signal()
def run(self): my_sum_qitem = None try: my_launcher = self.create_launcher() my_launcher.launch() my_sum_qitem = SummaryQueueItem(my_launcher.extract_results()) except Exception as arg_ex: Msg.error_trace(str(arg_ex)) Msg.err("Message: %s, Control File Path: %s" % (str(arg_ex), PathUtils.current_dir())) my_sum_qitem = SummaryErrorQueueItem({ "error": arg_ex, "message": "Error Processing Task ...", "path": self.ctrl_item.file_path(), "type": str(type(arg_ex)), }) finally: return my_sum_qitem
def purge_dirs(arg_class, arg_basedir): try: my_dirlist = sorted(os.listdir(arg_basedir)) Msg.lout(my_dirlist, "dbg") for my_dir in my_dirlist: #PathUtils.rmdir( my_dir, True ) PathUtils.rmdir( PathUtils.include_trailing_path_delimiter(arg_basedir) + my_dir, True) # Msg.user( "Directory To Remove: %s" % ( my_dir ) ) except OSError as arg_ex: Msg.error_trace() Msg.err(str(arg_ex)) return False except Exception as arg_ex: Msg.error_trace() Msg.err(str(arg_ex)) return False return True
def load_process_log(self): Msg.fout(self.process_log, "dbg") with open(self.process_log, "r") as my_flog: try: for my_line in my_flog: Msg.user("Load: %s" % my_line, "PROCESS-LINE") self.load_process_line(my_line) except Exception as arg_ex: Msg.error_trace(arg_ex) Msg.err(str(arg_ex)) finally: my_flog.close() # here is the lowdown, if a command line exists then a result line must # also exist, check for generate pair if (self.detail_flags & SummaryDetail.GenCmd ) and not (self.detail_flags & SummaryDetail.GenResult): retcode, message = self.check_missing_result( self.process_result, "generator") self.load_gen_result({ "retcode": retcode, "stdout": self.process_result[1], "stderr": self.process_result[2], "start": self.process_result[3], "end": self.process_result[4], "message": message, }) # check for summary pair elif (self.detail_flags & SummaryDetail.IssCmd ) and not (self.detail_flags & SummaryDetail.IssResult): retcode, message = self.check_missing_result( self.process_result, "iss") self.load_iss_result({ "retcode": retcode, "log": None, "message": message }) # check for compare pair elif (self.detail_flags & SummaryDetail.TraceCmpCmd ) and not (self.detail_flags & SummaryDetail.TraceCmpResult): retcode, message = self.check_missing_result( self.process_result, "trace-cmp") self.load_trace_cmp_result({ "trace-cmp-retcode": retcode, "trace-cmp-log": None, "message": message, }) # check for RTL pair elif (self.detail_flags & SummaryDetail.RtlCmd ) and not (self.detail_flags & SummaryDetail.RtlResult): retcode, message = self.check_missing_result( self.process_result, "rtl") self.load_rtl_result({ "retcode": retcode, "log": None, "message": message })
def exec_process(arg_class, arg_cmd, arg_fout=None, arg_ferr=None, arg_timeout=None, arg_kill=False, arg_set_process=None): #def exec_process( arg_class, arg_cmd, arg_fout = None, arg_ferr = None, arg_timeout = None, arg_kill_all = True ): my_process_cmd = arg_cmd Msg.dbg(my_process_cmd) my_fout, my_ferr = SysUtils.open_output(arg_fout, arg_ferr) my_result = None my_stdout = None my_stderr = None Msg.user( "SysUtils.exec_process( ... , arg_set_process: %s" % (str(bool(arg_set_process is not None))), "SYS-UTILS") Msg.flush() try: from common.datetime_utils import DateTime my_process = subprocess.Popen(my_process_cmd, stdout=my_fout, stderr=my_ferr, shell=True) my_start_time = DateTime.Time() my_pid = my_process.pid if arg_timeout is None or not SysUtils.is_numeric(arg_timeout): Msg.user("Exec PID[NONE]: %s" % (str(my_pid))) my_result = my_process.communicate() Msg.user("Done PID[NONE]: %s" % (str(my_pid))) else: Msg.user("Timeout: %s" % (str(arg_timeout))) try: my_pgid = os.getpgid(my_pid) my_parent_pgid = os.getpgid(0) # use callback to save an instance of the process to allow if arg_set_process is not None: arg_set_process(my_process) # Msg.dbg( "Exec Cmd: %s" % ( str( arg_cmd ))) # Msg.dbg( "PID: %s, PGID: %s, P-PGID: %s, Timeout: %s" % ( str( my_pid ), str( my_pgid ), str( my_parent_pgid ), str(arg_timeout))) # Msg.flush() my_result = my_process.communicate(timeout=arg_timeout) #my_pgid = os.getpgid(my_pid) # Msg.user( "Done PID: %s, PGID: %s, Timeout: %s" % ( str( my_pid ), str( my_pgid ), str(arg_timeout))) except TimeoutExpired: try: # Msg.user( "Timeout occurred ...." ) my_parent_pgid = os.getpgid(0) my_pgid = os.getpgid(my_pid) # os.setpgid( os.getpid(), os.getpid()) # on timeout kill the spawned process and all related sub-processes if arg_kill: # Msg.dbg( "Killing Everything ..." ) # os.killpg( os.getpgid( my_pid ), signal.SIGTERM ) os.killpg(os.getpgid(my_pid), signal.SIGKILL) # Trigger Shutdown of spawned processes else: Msg.error_trace() Msg.err("Process Did Not Execute Properly: %s" % (my_process_cmd)) # my_process.kill() os.kill(my_pid, signal.SIGTERM) except OSError as arg_ex: # Msg.dbg( "Spawned Killing encountered OS Error ... " ) Msg.error(str(arg_ex)) finally: # Msg.dbg( "Timeout Occurred ... " ) my_result = my_process.communicate() Msg.flush() return (my_process.returncode, None, "Process Timeout Occurred", my_start_time, my_start_time) except Exception as arg_ex: Msg.err(str(arg_ex)) Msg.error_trace() except: Msg.error_trace() finally: Msg.user("[1] SysUtils::exec_process") # Msg.user( "SysUtils.exec_process[10]", "test" ) pass my_end_time = DateTime.Time() # Msg.dbg( "SysUtils.exec_process, my_result: %s" % ( str( my_result ))) # Msg.dbg( "Return Code: %d" % ( my_process.returncode )) if my_result[0] is not None: my_stdout = my_result[0].decode("utf-8") if my_result[1] is not None: my_stderr = my_result[1].decode("utf-8") return (my_process.returncode, my_stdout, my_stderr, my_start_time, my_end_time) finally: if not (my_fout == PIPE or my_fout.closed): my_fout.close() if not (my_ferr == PIPE or my_ferr.closed): my_ferr.close() return (SysUtils.PROCESS_UNABLE_TO_SPAWN, None, "Unable to Spawn Process ....", 0, None, None)
def process(self): # Msg.dbg( "FileController::process()" ) # Msg.dbg( "FileController Contents: \n\"" + str( self.fcontrol ) + "\n" ) try: # Msg.lout( my_options["work-dir"], MsgLevel.dbg, "Work Director Stack" ) # a control file may iterate through to completion according to the amount set in num_runs for my_ndx in range(self.ctrl_item.iterations): # Msg.user( "Executing %d of %d Iterations, Control File: %s" % ( my_ndx + 1, self.ctrl_item.iterations, self.ctrl_item.file_path()) , "FILE-ITERATION") if self.is_terminated(): break try: my_item_ndx = 0 # each item in the control set exists as a dictionary for my_item_dict in self.fcontrol: try: if self.is_terminated(): break my_item_ndx += 1 # Msg.user( "Processing Line: %s" % (str( my_item_dict )), "CTRL-FILE" ) ) my_ctrl_item = ControlItem() my_ctrl_item.parent_fctrl = self.parent_fctrl my_ctrl_item.fctrl_item = str(my_item_dict) try: my_ctrl_item.load(self.mAppsInfo, my_item_dict, self.ctrl_item) except: raise my_item_type = my_ctrl_item.item_type() my_controller = None if my_item_type == ControlItemType.TaskItem: # Msg.dbg( "\nControl Item is a Control Task ..." ) my_controller = TaskController( self.mProcessQueue, self.mAppsInfo) elif my_item_type == ControlItemType.FileItem: # Msg.dbg( "\nControl Item is a Control File ..." ) my_controller = FileController( self.mProcessQueue, self.mAppsInfo, self.mControlFileLocals) else: raise Exception( "\"" + my_fctrl_name + "\": Unknown Item Type ...\nUnable to Process ... " ) if my_controller.load(my_ctrl_item): my_controller.set_on_fail_proc( self.on_fail_proc) my_controller.set_is_term_proc( self.is_term_proc) my_controller.process() # Msg.dbg( "%s: Controller Creation Complete" % ( my_ctrl_item.fctrl_name )) except TypeError as arg_ex: Msg.err(str(arg_ex)) my_err_queue_item = SummaryErrorQueueItem({ "error": "Item #%s Contains an Invalid Type" % (str(my_item_ndx)), "message": arg_ex, "path": self.ctrl_item.file_path(), "type": str(type(arg_ex)) }) if self.mProcessQueue.summary is not None: self.mProcessQueue.summary.queue.enqueue( my_err_queue_item) Msg.blank() except FileNotFoundError as arg_ex: Msg.err(str(arg_ex)) my_err_queue_item = SummaryErrorQueueItem({ "error": arg_ex, "message": "Control File Not Found ...", "path": self.ctrl_item.file_path(), "type": str(type(arg_ex)) }) if self.mProcessQueue.summary is not None: self.mProcessQueue.summary.queue.enqueue( my_err_queue_item) Msg.blank() except Exception as arg_ex: Msg.error_trace(str(arg_ex)) Msg.err(str(arg_ex)) Msg.blank() finally: my_controller = None my_item_dict = None except Exception as arg_ex: Msg.error_trace("[ERROR] - " + str(arg_ex)) Msg.err(str(arg_ex)) finally: pass finally: pass # Msg.dbg() return True
Msg.dbg("Directory set to %s" % (PathUtils.current_dir())) if not PathUtils.chdir(my_module.frun_dir, False): Msg.dbg( "Directory Unchanged, using the current directory for output") my_module.run() Msg.dbg("Test Completed ....\n") Msg.blank() # sys.exit( 0 ) except Exception as arg_ex: from force_init import force_usage Msg.err("An Unhandled Error has Occurred during run of " + str(sys.argv[0])) traceback.print_exc(file=sys.stdout) Msg.error_trace(str(arg_ex)) apps_info.mCmdLineOpts.print_help() sys.exit(41) except: print("[ERROR] - An Unhandled Error has Occurred during run of " + str(sys.argv[0])) traceback.print_exc(file=sys.stdout) sys.exit(42) finally: if my_logfile is not None: my_hlog.close() sys.stdout = my_org_stdout # print( "STDOUT Reset ...." ) with open(my_logfile, "r") as my_hlog:
Msg.dbg("Directory set to %s" % (PathUtils.current_dir())) if not PathUtils.chdir(the_force_root, False): Msg.dbg( "Directory Unchanged, using the current directory for output" ) my_module.run() Msg.dbg("Test Completed ....\n") Msg.blank() sys.exit(40) except getopt.GetoptError as arg_ex: from force_init import force_usage Msg.error_trace("[ERROR] - " + str(arg_ex)) force_usage(UsageStr) sys.exit(41) except Exception as arg_ex: from force_init import force_usage Msg.err( "[ERROR] - An Unhandled Error has Occurred during run of " + str(sys.argv[0]) ) traceback.print_exc(file=sys.stdout) Msg.error_trace(str(arg_ex)) force_usage(UsageStr) sys.exit(42)