def populate(self, arg_dict): self.work_dir = arg_dict[CtrlItmKeys.fdir] #Msg.trace() Msg.user("self.work_dir: %s" % (self.work_dir), "ParentData::populate()") self.test_root = arg_dict[CtrlItmKeys.test_root] self.mode = arg_dict[CtrlItmKeys.mode] self.force_path = arg_dict[CtrlItmKeys.force_path] self.action = arg_dict[CtrlItmKeys.action] self.process_queue = arg_dict[CtrlItmKeys.process_queue] self.force_cmd = arg_dict[CtrlItmKeys.force_cmd] self.process_max = arg_dict.get(CtrlItmKeys.process_max, CtrlItmDefs.process_max) self.iss_path = arg_dict.get(CtrlItmKeys.iss_path, CtrlItmDefs.iss_path) if not SysUtils.found(self.iss_path.find(self.test_root)): # Msg.user( "Test Root Not Found ..." ) # if not then prepend the test root self.iss_path = PathUtils.include_trailing_path_delimiter( self.test_root) + PathUtils.exclude_leading_path_delimiter( self.iss_path) Msg.user("Final Iss Path: %s" % (self.iss_path), "ISS_PATH") self.timeout = arg_dict.get(CtrlItmKeys.timeout, CtrlItmDefs.timeout) return self
def commit(self): my_gen_cnt = 0 my_gen_ret = 0 my_sim_cnt = 0 my_sim_ret = 0 my_tgt_name = "" if self.has_generate(): #Msg.user( "if self.has_generate(): True" ) my_gen_cnt = 1 my_gen_ret = self.commit_generate() if self.has_simulate(): #Msg.user( "if self.has_simulate(): True" ) my_sim_cnt = 1 my_sim_ret = self.commit_simulate() my_tgt_name = "%s%s" % (self.task_path, SysUtils.ifthen(bool(my_sim_ret), "PASS", "FAIL")) else: my_tgt_name = "%s%s" % (self.task_path, SysUtils.ifthen(bool(my_gen_ret), "PASS", "FAIL")) my_src_name = "%s%s" % (self.task_path, "STARTED") PathUtils.move(my_src_name, my_tgt_name) return (my_gen_cnt, my_gen_ret, my_sim_cnt, my_sim_ret)
def load_task_info(self): self.task_id = None self.task_index = None self.work_dir = None self.work_dir, my_tmp = PathUtils.split_path(self.frun_path) my_tmp, my_index = PathUtils.split_dir(self.work_dir) my_tmp, self.task_id = PathUtils.split_dir(my_tmp) self.task_index = int(my_index)
def process_task_list(self): for my_task_file in self.task_list: # Msg.dbg( "Process Task File: %s" % ( my_task_file )) my_curdir = PathUtils.current_dir() try: # self.process_task_file( my_task_file ) self.process_task_file(my_task_file) except Exception as arg_ex: Msg.error_trace() Msg.err(str(arg_ex)) Msg.blank() finally: PathUtils.chdir(my_curdir)
def process_summary( self, sum_level = SummaryLevel.Fail ): my_file_name = "%sperformance_summary.log" % ( PathUtils().include_trailing_path_delimiter( self.summary_dir )) Msg.dbg( "Master Log File: %s" % ( my_file_name )) my_utcdt = DateTime.UTCNow() my_ofile = None try: # First try to open file with open( my_file_name, "w" ) as my_ofile: my_ofile.write( "Date: %s\n" % ( DateTime.DateAsStr( my_utcdt ))) my_ofile.write( "Time: %s\n" % ( DateTime.TimeAsStr( my_utcdt ))) self.process_errors( my_ofile ) my_total_count, my_total_elapsed = self.process_groups( my_ofile ) Msg.blank( "info" ) my_line = "Total Instructions Generated: %3d\n" % ( my_total_count ) my_line += "Total Elapsed Time: %0.3f\n" % ( my_total_elapsed ) my_line += "Overall Instructions per Second: %0.3f\n" % ( SysUtils.ifthen( bool( my_total_elapsed ), my_total_count / my_total_elapsed, 0 )) Msg.info( my_line ) my_ofile.write( my_line ) except Exception as arg_ex: Msg.error_trace() Msg.err( "Error Processing Summary, " + str( arg_ex ) ) finally: my_ofile.close()
def start(self): config , email_json = ConfigLoader.get_config() #patch_pred = PatchPredictions(config) if config['ensemble']: pred = Ensemble_Predictions(config) elif config['model_name'] in ['unet']: pred = PatchPredictions(config) else: pred = PixelPredictions(config) if config['change_targets']: ind_dirs = PathUtils.get_indicator_directories(None) target_ids = list(range(-1,len(ind_dirs))) # target_ids = list(range(0,3)) scores = {} for id in target_ids: print(f'target id {id}') pred.set_target_paths(id) max_score, avg_score = pred.train_predict() scores[id] = {"max_score":round(max_score,5), "avg_score":round(avg_score,5)} for key,value in scores.items(): print(f"For target id {key} the max score is {value['max_score']} and avg score is {value['avg_score']}") else: # train_gen, test_gen = pred.get_data_generators() pred.train_predict()
def exec_iss(self, arg_task_name): # Msg.dbg( "ExecuteController::exec_iss( %s )" % ( arg_task_name ) ) # build rest of command for iss my_elf = "%s.Default.ELF" % (arg_task_name) # Msg.dbg( "Looking For: %s" % ( my_elf )) my_elf = SysUtils.ifthen(PathUtils.check_file(my_elf), " %s" % my_elf, "") # Msg.dbg( "Using ELF: %s" % ( my_elf )) my_elfns = "%s.Secondary.ELF" % (arg_task_name) # Msg.dbg( "Looking For: %s" % ( my_elfns )) my_elfns = SysUtils.ifthen(PathUtils.check_file(my_elfns), " %s" % my_elfns, "") my_cmd = self.sim_cmd % (my_elf, my_elfns) try: # execute the simulation Msg.info("ISSCommand = " + str({"iss-command": my_cmd})) my_log = self.sim_log my_elog = self.sim_log #my_ret_code = int( SysUtils.exec_cmd( my_cmd ) ) my_result = SysUtils.exec_process(my_cmd, my_log, my_elog, self.ctrl_item.timeout, True) my_ret_code = int(my_result[0]) my_std_out = str(my_result[1]) my_std_err = str(my_result[2]) my_start = str(my_result[3]) my_end = str(my_result[4]) Msg.info("ISSResult = " + str({ "iss-retcode": my_ret_code, "iss-log": self.sim_log, "iss-stdout": my_std_out, "iss-stderr": my_std_err, "iss-start": my_start, "iss-end": my_end })) except Exception as arg_ex: Msg.error_trace("ISS Execute Failure") Msg.err("ISS did not properly execute, Reason: %s" % (str(arg_ex))) return False return True
def check_full_path(self, arg_fctrl_dir): # handle fully qualified path if found Msg.dbg("Checking for control file with real path[%s][%s]" % (str(arg_fctrl_dir), str(self.fctrl_name))) if arg_fctrl_dir.startswith("/"): # there is only one scenario where the control directory starts with the path delimiter and that is the path # is a real path from the root directory of the system, the control file must exist at that location if PathUtils.check_found( PathUtils.append_path(arg_fctrl_dir, self.fctrl_name)): self.fctrl_dir = arg_fctrl_dir return True raise Exception( "Control Item File: [%s] count not be located Not Found at [%s]" % (self.fctrl_name, arg_fctrl_dir)) return False
def __init__(self, output_dir, indicators_path, patch_shape, img_downscale_factor, tuning): self.output_dir = output_dir self.patch_shape = patch_shape, patch_shape self.indicators_path = indicators_path self.indicator_directories = PathUtils.get_indicator_directories( self.indicators_path) self.img_downscale_factor = img_downscale_factor self.tuning = tuning
def process(self): # Msg.dbg( "ExecuteController::process()") # my_usr_lbl = Msg.set_label( "user", "EXEC-VALS" ) # Msg.lout( self.ctrl_item.catalougs(), "user", "Control Item Cataloug" ) my_task_file = PathUtils.include_trailing_path_delimiter( self.ctrl_item.parent_data.test_root) my_task_file += PathUtils.include_trailing_path_delimiter( self.ctrl_item.fctrl_dir) my_task_file += self.ctrl_item.fctrl_name my_tmp, my_task_ndx = PathUtils.split_path(self.ctrl_item.fctrl_dir) my_task_name = self.ctrl_item.fctrl_name.replace(".py", "") # Msg.user( "Task File: %s, Task Name: %s, Task Index: %s" % ( my_task_file, my_task_name, my_task_ndx ) ) self.process_task_file(my_task_file, my_task_name, my_task_ndx)
def load(self, arg_ctrl_item): super().load(arg_ctrl_item) # Msg.dbg( "TaskController::load()") self.task_list = PathUtils.list_files( self.ctrl_item.parent_data.test_root + self.ctrl_item.fctrl_dir + self.ctrl_item.fctrl_name) # Msg.dbg( "File Count: " + str( len( self.task_list ))) # Msg.lout(self.task_list, "dbg", "File List to Process with this Controller" ) return True
def check_ctrl_dir(self, arg_fctrl_dir): Msg.user( "Checking for control file at relative path[%s] from test root" % (arg_fctrl_dir), "RELATIVE PATH") my_tmp_dir = arg_fctrl_dir if not SysUtils.found(my_tmp_dir.find(self.parent_data.test_root)): # Prepend the test_root on the work directory Msg.user("Building Control Directory ...", "RELATIVE PATH") my_tmp_dir = PathUtils.append_path(self.parent_data.test_root, my_tmp_dir) if PathUtils.check_found( PathUtils.append_path(my_tmp_dir, self.fctrl_name)): self.fctrl_dir = my_tmp_dir Msg.user("Setting Control Directory: %s ..." % (self.fctrl_dir), "RELATIVE PATH") return True return False
def start(self): env_path = self.config['path'] sys_data_path = f"{env_path['predictions']}{self.config['predictions']}/" image_ref_csv_path, ref_data_path, _, _ = PathUtils.get_paths( self.config) starting_index, ending_index = JsonLoader.get_data_size(self.config) irb = ImgRefBuilder(image_ref_csv_path) img_refs = irb.get_img_ref(starting_index, ending_index) data = MediforData.get_data(img_refs, sys_data_path, ref_data_path) self.model_scoring(data)
def check_work_dir(self, arg_fctrl_dir): # check the work directory Msg.dbg( "Checking for control file using implict path[%s] of current control file" % (str(arg_fctrl_dir))) if not self.parent_data.work_dir is None: my_tmp_dir = PathUtils.append_path(self.parent_data.work_dir, arg_fctrl_dir) if not SysUtils.found(my_tmp_dir.find(self.parent_data.test_root)): # Prepend the test_root on the work directory my_tmp_dir = PathUtils.append_path(self.parent_data.test_root, my_tmp_dir) if PathUtils.check_found( PathUtils.append_path(my_tmp_dir, self.fctrl_name)): self.fctrl_dir = my_tmp_dir # Msg.dbg( "Using Control File Directory: %s" % ( self.fctrl_dir )) return True return False
def process_task_file(self, arg_task_file): try: #my_usr_lbl = Msg.set_label( "user", "TASK" ) #Msg.user( "Old User Lbl: %s" % ( my_usr_lbl )) # NOTE: a task file can be but is not limited to being an test template file # set base task directory and extract the task id and update directory my_task_name = PathUtils.base_name(arg_task_file) my_task_dir = my_task_name.replace(".py", "") PathUtils.chdir(my_task_dir, True) # Msg.dbg( "Changed to Base Dir, my_task_name(%s), my_task_dir(%s)" % (my_task_name, my_task_dir)) # check for exiting sub-directories, and extract the task iteration count to prevent # unintended modifications to the original passed on the commafrom shared.path_utils import PathUtilsnd line for this task or # acquired from a control file item self.process_task(arg_task_file) finally: PathUtils.chdir("..")
def __init__( self, arg_frun_path, arg_ctrl_item, arg_content): # parent_fctrl, arg_fctrl_item, arg_item_group ): # def __init__( self, arg_frun_path, arg_parent_fctrl, arg_fctrl_item, arg_item_group, arg_content ): super().__init__() self.frun_path = arg_frun_path self.work_dir, my_tmp = PathUtils.split_path(arg_frun_path) self.ctrl_item = arg_ctrl_item self.parent_fctrl = arg_ctrl_item.parent_fctrl self.fctrl_item = arg_ctrl_item.fctrl_item self.item_group = arg_ctrl_item.group # self.frun_str = arg_frun_str self.content = arg_content
def extract_iss_data(self, arg_item_dict): # first get the iss dictionary # Msg.lout( arg_item_dict, "user", "RAW Item Dictionary ..." ) my_iss_data = arg_item_dict.get(CtrlItmKeys.iss, CtrlItmDefs.iss) my_iss_path = my_iss_data.get(CtrlItmKeys.iss_path, None) if my_iss_path is not None: # if the iss path was not specified then the iss path currently in the parent data instance is the correct one Msg.dbg("Iss Path: %s, Test Root: %s" % (my_iss_path, self.parent_data.test_root)) # check to see if path is absolute path if not SysUtils.found(my_iss_path.find( self.parent_data.test_root)): Msg.dbg("Test Root Not Found ...") # if not then prepend the test root my_iss_path = PathUtils.include_trailing_path_delimiter( self.parent_data.test_root ) + PathUtils.exclude_leading_path_delimiter(my_iss_path) self.parent_data.update_iss_path(my_iss_path) Msg.dbg("Final Iss Path: %s" % (self.parent_data.iss_path))
def start(self): img_ref_csv_path, ref_data_path, targets_path, indicators_path = PathUtils.get_paths( self.config) irb = ImgRefBuilder(img_ref_csv_path) starting_index, ending_index = JsonLoader.get_data_size(self.config) img_refs = irb.get_img_ref(starting_index, ending_index) patches_folder = self.config['path']['outputs'] + "patches/" output_dir = FolderUtils.create_patch_output_folder( self.patch_shape, self.img_downscale_factor, patches_folder, PathUtils.get_indicator_directories(indicators_path), self.config['tuning'], self.config['data_year'], self.config['data_prefix']) LogUtils.init_log(output_dir) pg = PatchGenerator(output_dir=output_dir, indicators_path=indicators_path, img_downscale_factor=self.img_downscale_factor, patch_shape=self.patch_shape, tuning=self.config["tuning"]) pg.create_img_patches(img_refs, targets_path)
def __init__(self, config, model_name=None, output_dir=None, target_id=None): super().__init__(config, model_name, output_dir) self.patches_path, self.patch_img_ref_path, self.indicators_path, img_ref_csv, self.ref_data_path = PathUtils.get_paths_for_patches( self.config) self.indicator_directories = PathUtils.get_indicator_directories( self.indicators_path) self.set_target_paths(target_id or config['target_id']) self.starting_index, self.ending_index = JsonLoader.get_data_size( self.config) self._prepare_img_refs(self.patch_img_ref_path)
def load_force_elog( self ): self.force_msg = None my_elog = "%s%s" % ( PathUtils.include_trailing_path_delimiter( self.work_dir ), self.force_elog ) # Msg.dbg( my_elog ) if SysUtils.failed( self.force_retcode ): Msg.fout( my_elog, "dbg" ) with open( my_elog , "r" ) as my_flog: try: for my_line in my_flog: if SysUtils.found( my_line.find( "[fail]" )): self.force_msg = my_line.replace( "[fail]", "" ).strip() # Msg.dbg( "Message: %s" % ( str( self.force_msg ))) break finally: my_flog.close()
def instruction_counts( self ): my_lines = None my_glog = "%s%s" % ( PathUtils.include_trailing_path_delimiter( self.work_dir ), self.force_log ) Msg.user("Path: %s" % my_glog) with open( my_glog, "r" ) as my_log: my_lines = my_log.readlines() Msg.dbg( "Line %d: %s" % ( len( my_lines ), my_lines[-1])) my_log.close() try: my_results = [ my_tmp for my_tmp in my_lines if re.search( ' Instructions Generated', my_tmp )] Msg.lout( my_results, "dbg" ) if not my_results: raise Exception( "Instruction Count Not Found in \"gen.log\"" ) # ok there are instruction counts located for my_line in my_results: my_line = my_line.strip() # find the instruction type (Total, Default, Secondary) my_lpos = my_line.find(']') my_rpos = my_line.find( "Instr" ) my_type = PerformanceInstructionType.instruction_type(( my_line[my_lpos + 1 : my_rpos - 1 ]).strip()) # get the count for this instruction type my_lpos = my_line.find( ':' ) my_count = int( my_line[my_lpos+2:].strip()) if my_type == PerformanceInstructionType.Total: self.count = my_count elif my_type == PerformanceInstructionType.Secondary: self.secondary = my_count elif my_type == PerformanceInstructionType.Default: self.default = my_count except ValueError: Msg.error_trace() Msg.err( "Unable to extract instruction count from %s" % ( int( my_lines[-1] ))) return 0
def add_image_width_height(img_refs, config): index_csv_path = PathUtils.get_index_csv_path(config) with open(index_csv_path, 'r') as f: reader = csv.reader(f, delimiter=',') headers = next(reader) img_id_col_index = headers.index('image_id') img_orig_height_col_index = headers.index('image_height_original') img_orig_width_col_index = headers.index('image_width_original') img_height_col_index = headers.index('image_height') img_width_col_index = headers.index('image_width') counter = 0 for row in reader: img_ref = next((i for i in img_refs if i.probe_file_id == row[img_id_col_index]),None) if img_ref == None : continue img_ref.img_orig_height = int(float(row[img_orig_height_col_index])) img_ref.img_orig_width = int(float(row[img_orig_width_col_index])) img_ref.img_height = int(float(row[img_height_col_index])) img_ref.img_width = int(float(row[img_width_col_index])) counter +=1 if counter == len(img_refs): break
def start(self): data_path = f"{self.config['path']['data']}{self.config['data_prefix']}{self.config['data_year']}" csv_path = PathUtils.get_csv_data_path(self.config) df = pd.read_csv(csv_path) indicators = df.columns[:-3] output_path = self.config['path']['outputs'] + "csv_to_image/" output_dir, index_dir, img_ref_dir = FolderUtils.create_csv_to_image_output_folder( output_path, self.config['data_prefix'], self.config['data_year'], indicators) FolderUtils.csv_to_image_copy_csv_files(data_path, index_dir, img_ref_dir) LogUtils.init_log(output_dir) cg = CsvToImageGenerator(config=self.config, output_dir=output_dir, df=df, indicators=indicators, index_dir=index_dir, img_ref_dir=img_ref_dir) cg.generate_images()
def get_data_generators(self, missing_probe_file_ids): if self._is_keras_img_model(): from data_generators.img_train_data_generator import ImgTrainDataGenerator from data_generators.img_test_data_generator import ImgTestDataGenerator targets_path = os.path.join(self.targets_path, "manipulation","mask") train_gen = ImgTrainDataGenerator( data_size=len(self.train_img_refs), img_refs = self.train_img_refs, patch_shape = self.patch_shape, batch_size = self.train_batch_size, indicator_directories = self.indicator_directories, indicators_path = self.indicators_path, targets_path = targets_path, ) test_gen = ImgTestDataGenerator( data_size=len(self.test_img_refs), img_refs = self.test_img_refs, patch_shape = self.patch_shape, batch_size = self.test_batch_size, indicator_directories = self.indicator_directories, indicators_path = self.indicators_path, targets_path = targets_path, missing_probe_file_ids = missing_probe_file_ids ) valid_gen = ImgTrainDataGenerator( data_size=len(self.test_img_refs), img_refs = self.test_img_refs, batch_size = self.train_batch_size, patch_shape = self.patch_shape, indicator_directories = self.indicator_directories, indicators_path = self.indicators_path, targets_path = targets_path, ) elif self.config['data_type'] == "image": from data_generators.img_pixel_train_data_generator import ImgPixelTrainDataGenerator from data_generators.img_pixel_test_data_generator import ImgPixelTestDataGenerator train_gen = ImgPixelTrainDataGenerator( data_size=self.train_data_size, img_refs = self.train_img_refs, patch_shape = self.patch_shape, batch_size = self.train_batch_size, indicator_directories = self.indicator_directories, indicators_path = self.indicators_path, targets_path = self.targets_path, image_downscale_factor=self.image_downscale_factor ) test_gen = ImgPixelTestDataGenerator( data_size=self.test_data_size, img_refs = self.test_img_refs, patch_shape = self.patch_shape, batch_size = self.test_batch_size, indicator_directories = self.indicator_directories, indicators_path = self.indicators_path, targets_path = self.targets_path, missing_probe_file_ids = missing_probe_file_ids, image_downscale_factor=self.image_downscale_factor ) valid_gen = ImgPixelTrainDataGenerator( data_size=self.test_data_size, img_refs = self.test_img_refs, batch_size = self.train_batch_size, patch_shape = self.patch_shape, indicator_directories = self.indicator_directories, indicators_path = self.indicators_path, targets_path = self.targets_path, image_downscale_factor=self.image_downscale_factor ) elif self.config['data_type'] == 'csv': csv_path = PathUtils.get_csv_data_path(self.config) df = pd.read_csv(csv_path) from data_generators.csv_pixel_test_data_generator import CsvPixelTestDataGenerator from data_generators.csv_pixel_train_data_generator import CsvPixelTrainDataGenerator from data_generators.csv_nn_data_generator import CsvNnDataGenerator train_gen = CsvPixelTrainDataGenerator( data_size=self.train_data_size, data = df, img_refs = self.train_img_refs, batch_size = self.train_batch_size ) valid_gen = CsvPixelTrainDataGenerator( data_size=self.test_data_size, data= df, img_refs = self.test_img_refs, batch_size = self.train_batch_size ) test_gen = CsvPixelTestDataGenerator( data_size=self.test_data_size, data = df, img_refs = self.test_img_refs, batch_size = self.test_batch_size, ) # q,w, id = test_gen.__getitem__(0) # a,b, id = train_gen.__getitem__(0) return train_gen, test_gen, valid_gen
# with UnitTest_HiStack(): pass # checks for usage requests and performs the initial validation of the command line def check_usage(self): # validate arguments if self.option_def(CmdLine.Switches[CmdLine.help], False, CmdLine.Help): # if self.get_option( ["-h","--" + CmdLine.Switches[CmdLine.help].replace("=", "" )], False ): from force_init import force_usage force_usage(UsageStr) if __name__ == "__main__": # save current working directory my_pwd = PathUtils.current_dir() try: my_module = UnitTestRun(the_force_root) Msg.info("\nForce Path: %s" % (str(the_force_root))) Msg.info("Original Directory: " + my_pwd) Msg.dbg("Processing Command Line and Loading Control File") my_module.load() Msg.dbg("Directory set to %s" % (PathUtils.current_dir())) if not PathUtils.chdir(the_force_root, False): Msg.dbg( "Directory Unchanged, using the current directory for output")
def file_path(self): return "%s%s%s" % (self.parent_data.test_root, PathUtils.include_trailing_path_delimiter( self.fctrl_dir), self.fctrl_name)
def resolve_file_location(self): my_fctrl_dir, self.fctrl_name = PathUtils.split_path( str(self.fctrl_name)) Msg.user( "Extracted File Path: %s, Control File Path: %s, Extracted File Name: %s" % (str(my_fctrl_dir), str(self.fctrl_dir), str(self.fctrl_name)), "FCTRL-DIR") Msg.user( "1 - Control Directory: %s, Force Directory: %s, Test Root: %s, Work Dir: %s, Control File: %s, self.fctrl_dir: %s" % (my_fctrl_dir, self.parent_data.force_path, self.parent_data.test_root, self.parent_data.work_dir, self.fctrl_name, self.fctrl_dir), "FCTRL-DIR") # if the name does not contain a path use the contol directory if my_fctrl_dir is None: my_fctrl_dir = self.fctrl_dir Msg.user( "Control Directory: %s, Force Directory: %s, Test Root: %s, Work Dir: %s, Control File: %s" % (my_fctrl_dir, self.parent_data.force_path, self.parent_data.test_root, self.parent_data.work_dir, self.fctrl_name), "FCTRL-DIR") # because of the requirement of not knowing and needing to discover the directory # situation it is necessary to search for the control file my_tmp = None my_tmp_dir = None # if the a directory was specified in the control name or a specifies as part of the control item it is # necessary to check for either a absolute path or relative parent path # Msg.dbg( "Checking: [%s] for [%s]" % ( my_fctrl_dir, self.fctrl_name )) if my_fctrl_dir is None: raise Exception( "Control File Location was not specified, File[%s]" % (self.fctrl_name)) if self.check_full_path(my_fctrl_dir): Msg.user("Using Real Path as Control Directory", "FILE_LOCATION") pass elif self.check_parent_dir(my_fctrl_dir): Msg.user("Using Parent Directory as Control Directory", "FILE_LOCATION") pass elif self.check_work_dir(my_fctrl_dir): Msg.user( "Using Current Control File Location as Control Directory", "FILE_LOCATION") pass elif self.check_ctrl_dir(my_fctrl_dir): Msg.user("Using Specified Control Directory as Control Directory", "FILE_LOCATION") pass else: if self.item_type() == ControlItemType.TaskItem: my_err_queue_item = SummaryErrorQueueItem({ "error": "Template Not Found at Specified Location", "message": "Template File Not Found ...", "path": self.file_path(), "type": str("FileNotFoundError") }) self.summary().queue.enqueue(my_err_queue_item) elif self.item_type() == ControlItemType.FileItem: my_err_queue_item = SummaryErrorQueueItem({ "error": "FileNotFoundError", "message": "Control File Not Found at Specified Location", "path": self.file_path(), "type": str("FileNotFoundError") }) self.summary().queue.enqueue(my_err_queue_item) raise Exception("File[%s] Not Found at Location Specified[%s]" % (self.fctrl_name, my_fctrl_dir)) # Msg.dbg( "Raw Control Directory: %s" % ( self.fctrl_dir )) # Msg.dbg( "Test Root: %s" % ( self.test_root )) # remove the test root if present if SysUtils.found(self.fctrl_dir.find(self.parent_data.test_root)): # Msg.dbg( "Test Root Found" ) self.fctrl_dir = self.fctrl_dir.replace(self.parent_data.test_root, "") # Msg.dbg( "Trimmed Control Directory: %s" % ( self.fctrl_dir )) # add the trailing path delimiter self.fctrl_dir = PathUtils.include_trailing_path_delimiter( self.fctrl_dir) Msg.user("Delimited Control Directory: %s" % (self.fctrl_dir), "FILE_LOCATION") return True
def check_parent_dir(self, arg_fctrl_dir): Msg.dbg("Checking for control file in parent directory") Msg.user( "Test Root[%s], Work Dir[%s]" % (str(self.parent_data.test_root), str(self.parent_data.work_dir)), "CHECK-PARENT-DIR") if arg_fctrl_dir.startswith(".."): my_search_dir = arg_fctrl_dir my_work_dir = PathUtils.append_path(self.parent_data.test_root, self.parent_data.work_dir) my_test_root = self.parent_data.test_root Msg.user( "Updated Test Root[%s], Full Work Dir[%s], Search Dir[%s]" % (my_test_root, my_work_dir, my_search_dir), "CHECK-PARENT-DIR") while my_search_dir.startswith(".."): # First get the parent directory # The search [updated] directory started with [..] split path for both the work directory and the search directory # this fact that the [updated] search directory still begins with a [..], means that the work directory my_test_root, my_dumy = PathUtils.split_dir(my_test_root) my_work_dir, my_dumy = PathUtils.split_dir(my_work_dir) my_search_dir = my_search_dir[3:] Msg.user( "Updated Test Root[%s], Full Work Dir[%s], Search Dir[%s]" % (my_test_root, my_work_dir, my_search_dir), "CHECK-PARENT-DIR") my_filepath = str( PathUtils.append_path( my_work_dir, PathUtils.append_path(my_search_dir, self.fctrl_name))) Msg.user("File Path[%s] ... " % (my_filepath), "CHECK-PARENT-DIR") if PathUtils.check_found(my_filepath): self.fctrl_dir = PathUtils.append_path( my_work_dir, my_search_dir) return True my_filepath = str( PathUtils.append_path( my_test_root, PathUtils.append_path(my_search_dir, self.fctrl_name))) Msg.user("File Path[%s] ... " % (my_filepath), "CHECK-PARENT-DIR") if PathUtils.check_found(my_filepath): self.fctrl_dir = PathUtils.append_path( my_test_root, my_search_dir) return True raise Exception( "Control Item File[%s] Not Found in Parent Directory: [%s]" % (self.fctrl_name, arg_fctrl_dir)) return False
def process_task(self, arg_task_file): try: # get the subdirectory index my_ndx = int(PathUtils.next_subdir()) # update the user label and send iteration message to the screen if user is active # my_usr_lbl = Msg.set_label( "user", "TEST-ITERATION" ) # Msg.user( "Executing Iteration #%d of Test File: %s" % ( my_ndx + 1, arg_task_file )) # Msg.set_label( "user", my_usr_lbl ) # create subdirectory and change into it PathUtils.chdir("%05d" % my_ndx, True) # save the task template file name with path to the control item self.ctrl_item.fname = arg_task_file # write out the control file # if the write was successful then enqueue the new control file name with real path my_ctrl_file = "%s_def_frun.py" % ( PathUtils.include_trailing_path_delimiter( PathUtils.current_dir())) PathUtils.touch("%sSTARTED" % PathUtils.include_trailing_path_delimiter( PathUtils.current_dir())) my_content = self.prepare(arg_task_file) if self.write_control_file(my_ctrl_file, my_content): # my_queue_item = ProcessQueueItem( my_ctrl_file, self.ctrl_item.parent_fctrl, self.ctrl_item.fctrl_item, self.ctrl_item.group, my_content ) my_queue_item = ProcessQueueItem( my_ctrl_file, self.ctrl_item, my_content ) # self.parent_fctrl, self.ctrl_item.fctrl_item, self.ctrl_item.group) self.ctrl_item.parent_data.process_queue.enqueue(my_queue_item) except Exception as arg_ex: Msg.error_trace() Msg.err(str(arg_ex)) # reraise to prevent adding to summary instance raise finally: PathUtils.chdir("..")
def __init__(self, config, model_name=None, output_dir=None): super().__init__(config, model_name, output_dir) img_ref_csv_path, self.ref_data_path, self.targets_path, self.indicators_path = PathUtils.get_paths(self.config) self.indicator_directories = PathUtils.get_indicator_directories(self.indicators_path) self.image_downscale_factor = config['image_downscale_factor'] self._prepare_img_refs(img_ref_csv_path)