handler1.setFormatter(log_formatter) log.addHandler(handler1) handler2 = logging.StreamHandler() handler2.setFormatter(log_formatter) log.addHandler(handler2) log.setLevel(logging.INFO) report_log = logging.getLogger("report_log") report_formatter = logging.Formatter('%(message)s') report_handler = logging.FileHandler( "{report_file_name}.csv".format(report_file_name=report_file_name), 'w') report_handler.setFormatter(report_formatter) report_log.addHandler(report_handler) report_log.setLevel(logging.INFO) report_log.info(Report.get_header() + "," + ConfigHandler.get_header()) allow_print_rem_designs = False def printRemDesignList(): t = threading.Timer(print_rem_time, printRemDesignList) t.start() if allow_print_rem_designs: print("Remaining designs (design, # of times): ", rem_designs) if len(rem_designs) == 0: t.cancel() if print_rem_time is not None: printRemDesignList()
def run_design(designs_queue): while not designs_queue.empty(): design, config, tag, design_name = designs_queue.get( timeout=3) # 3s timeout run_path = utils.get_run_path(design=design, tag=tag) command = './flow.tcl -design {design} -tag {tag} -overwrite -disable_output -config_tag {config} -no_save'.format( design=design, tag=tag, config=config) log.info('{design} {tag} running'.format(design=design, tag=tag)) command = "" if show_log_output: command = './flow.tcl -design {design} -tag {tag} -overwrite -config_tag {config} -no_save'.format( design=design, tag=tag, config=config) else: command = './flow.tcl -design {design} -tag {tag} -overwrite -disable_output -config_tag {config} -no_save'.format( design=design, tag=tag, config=config) try: if show_log_output: process = subprocess.Popen(command.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE) while True: output = process.stdout.readline() if not output: break if output: print(str(output.strip())[2:-1]) else: subprocess.check_output(command.split(), stderr=subprocess.PIPE) except subprocess.CalledProcessError as e: error_msg = e.stderr.decode(sys.getfilesystemencoding()) log.error('{design} {tag} failed check {run_path}error.txt'.format( design=design, run_path=run_path, tag=tag)) with open(run_path + "error.txt", "w") as error_file: error_file.write(error_msg) log.info('{design} {tag} finished\t Writing report..'.format( design=design, tag=tag)) params = ConfigHandler.get_config(design, tag) report = Report(design, tag, design_name, params).get_report() report_log.info(report) with open(run_path + "final_report.txt", "w") as report_file: report_file.write(Report.get_header() + "," + ConfigHandler.get_header()) report_file.write("\n") report_file.write(report) if args.benchmark is not None: log.info('{design} {tag} Comparing vs benchmark results..'.format( design=design, tag=tag)) design_benchmark_comp_cmd = "python3 scripts/compare_regression_design.py -b {benchmark} -r {this_run} -o {output_report} -d {design} -rp {run_path}".format( benchmark=args.benchmark, this_run=report_file_name + ".csv", output_report=report_file_name + "_design_test_report.csv", design=design, run_path=run_path) subprocess.check_output(design_benchmark_comp_cmd.split()) if args.clean: log.info('{design} {tag} Cleaning tmp Directory..'.format( design=design, tag=tag)) moveUnPadded_cmd = "cp {run_path}/tmp/merged_unpadded.lef {run_path}/results/".format( run_path=run_path, tag=tag) subprocess.check_output(moveUnPadded_cmd.split()) clean_cmd = "rm -rf {run_path}/tmp/".format(run_path=run_path, tag=tag) subprocess.check_output(clean_cmd.split()) log.info('{design} {tag} Cleaning tmp Directory Finished'.format( design=design, tag=tag)) if tarList[0] != "": log.info('{design} {tag} Compressing Run Directory..'.format( design=design, tag=tag)) try: if 'all' in tarList: tarAll_cmd = "tar -cvzf {run_path}../{design_name}_{tag}.tar.gz {run_path}".format( run_path=run_path, design_name=design_name, tag=tag) subprocess.check_output(tarAll_cmd.split()) else: tarString = "tar -cvzf {run_path}../{design_name}_{tag}.tar.gz" for dirc in tarList: tarString += " {run_path}" + dirc tar_cmd = tarString.format(run_path=run_path, design_name=design_name, tag=tag) subprocess.check_output(tar_cmd.split()) log.info( '{design} {tag} Compressing Run Directory Finished'.format( design=design, tag=tag)) except subprocess.CalledProcessError as e: log.info( '{design} {tag} Compressing Run Directory Failed'.format( design=design, tag=tag)) if args.delete: log.info('{design} {tag} Deleting Run Directory..'.format( design=design, tag=tag)) deleteDirectory = "rm -rf {run_path}".format(run_path=run_path) subprocess.check_output(deleteDirectory.split()) log.info('{design} {tag} Deleting Run Directory Finished..'.format( design=design, tag=tag)) if print_rem_time is not None: if design in rem_designs.keys(): rem_designs[design] -= 1 if rem_designs[design] == 0: rem_designs.pop(design)
parser.add_argument('--design', '-d', required=True, help='Design Path') parser.add_argument('--design_name', '-dn', required=True, help='Design Name') parser.add_argument('--tag', '-t', required=True, help='Run Tag') parser.add_argument('--run_path', '-r', default=None, help='Run Path') parser.add_argument('--output_file', '-o', required=True, help='Output File') args = parser.parse_args() design = args.design design_name = args.design_name tag = args.tag run_path = args.run_path output_file = args.output_file # Extracting Configurations params = ConfigHandler.get_config(design, tag, run_path) # Extracting Report report = Report(design, tag, design_name, params, run_path).get_report() # write into file outputFileOpener = open(output_file, "w") outputFileOpener.write(Report.get_header() + "," + ConfigHandler.get_header()) outputFileOpener.write("\n") outputFileOpener.write(report) outputFileOpener.close() # Adding Extra Attributes computed from configs and reported statistics utils.addComputedStatistics(output_file)
def run_design(designs_queue): while not designs_queue.empty(): design, config, tag = designs_queue.get(timeout=3) # 3s timeout run_path = utils.get_run_path(design=design, tag=tag) command = './flow.tcl -design {design} -tag {tag} -overwrite -disable_output -config_tag {config} -no_save'.format( design=design, tag=tag, config=config) log.info('{design} {tag} running'.format(design=design, tag=tag)) try: subprocess.check_output(command.split(), stderr=subprocess.PIPE) except subprocess.CalledProcessError as e: error_msg = e.stderr.decode(sys.getfilesystemencoding()) #print(error_msg) log.error('{design} {tag} failed check {run_path}error.txt'.format( design=design, run_path=run_path, tag=tag)) #report_log.error('{design} {tag} failed'.format(design=design, tag=tag)) with open(run_path + "error.txt", "w") as error_file: error_file.write(error_msg) #continue log.info('{design} {tag} finished\t Writing report..'.format( design=design, tag=tag)) params = ConfigHandler.get_config(design, tag) report = Report(design, tag, params).get_report() report_log.info(report) with open(run_path + "final_report.txt", "w") as report_file: report_file.write(Report.get_header() + ", " + ConfigHandler.get_header()) report_file.write("\n") report_file.write(report) if args.clean: log.info('{design} {tag} Cleaning tmp Directory..'.format( design=design, tag=tag)) moveUnPadded_cmd = "cp ./designs/{design}/runs/{tag}/tmp/merged_unpadded.lef ./designs/{design}/runs/{tag}/results/".format( design=design, tag=tag) subprocess.check_output(moveUnPadded_cmd.split()) clean_cmd = "rm -rf ./designs/{design}/runs/{tag}/tmp/".format( design=design, tag=tag) subprocess.check_output(clean_cmd.split()) log.info('{design} {tag} Cleaning tmp Directory Finished'.format( design=design, tag=tag)) if tarList[0] != "": log.info('{design} {tag} Compressing Run Directory..'.format( design=design, tag=tag)) if 'all' in tarList: tarAll_cmd = "tar -cvzf ./designs/{design}/runs/{design}_{tag}.tar.gz ./designs/{design}/runs/{tag}/".format( design=design, tag=tag) subprocess.check_output(tarAll_cmd.split()) else: tarString = "tar -cvzf ./designs/{design}/runs/{design}_{tag}.tar.gz" for dirc in tarList: tarString += " ./designs/{design}/runs/{tag}/" + dirc tar_cmd = tarString.format(design=design, tag=tag) subprocess.check_output(tar_cmd.split()) log.info( '{design} {tag} Compressing Run Directory Finished'.format( design=design, tag=tag)) if args.delete: log.info('{design} {tag} Deleting Run Directory..'.format( design=design, tag=tag)) deleteDirectory = "rm -rf ./designs/{design}/runs/{tag}/".format( design=design, tag=tag) subprocess.check_output(deleteDirectory.split()) log.info('{design} {tag} Deleting Run Directory Finished..'.format( design=design, tag=tag))
def run_design(designs_queue): while not designs_queue.empty(): design, config, tag, design_name = designs_queue.get( timeout=3) # 3s timeout run_path = utils.get_run_path(design=design, tag=tag) log.info("{design} {tag} running".format(design=design, tag=tag)) command = "" if show_log_output: command = "{ol_entry} -design {design} -tag {tag} -overwrite -config_tag {config} -no_save".format( ol_entry=os.getenv("OPENLANE_ENTRY") or "./flow.tcl", design=design, tag=tag, config=config, ) else: command = "{ol_entry} -design {design} -tag {tag} -overwrite -disable_output -config_tag {config} -no_save".format( ol_entry=os.getenv("OPENLANE_ENTRY") or "./flow.tcl", design=design, tag=tag, config=config, ) skip_rm_from_rems = False try: if show_log_output: process = subprocess.Popen(command.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE) while True: output = process.stdout.readline() if not output: break if output: print(str(output.strip())[2:-1]) else: subprocess.check_output(command.split(), stderr=subprocess.PIPE) except subprocess.CalledProcessError as e: if print_rem_time is not None: rmDesignFromPrintList(design) skip_rm_from_rems = True error_msg = e.stderr.decode(sys.getfilesystemencoding()) log.error("{design} {tag} failed check {run_path}error.txt".format( design=design, run_path=run_path, tag=tag)) with open(run_path + "error.txt", "w") as error_file: error_file.write(error_msg) if print_rem_time is not None and not skip_rm_from_rems: rmDesignFromPrintList(design) log.info("{design} {tag} finished\t Writing report...".format( design=design, tag=tag)) params = ConfigHandler.get_config(design, tag) report = Report(design, tag, design_name, params).get_report() report_log.info(report) with open(run_path + "final_report.txt", "w") as report_file: report_file.write(Report.get_header() + "," + ConfigHandler.get_header()) report_file.write("\n") report_file.write(report) if args.benchmark is not None: try: log.info("{design} {tag} Comparing with benchmark results...". format(design=design, tag=tag)) design_benchmark_comp_cmd = "python3 scripts/compare_regression_design.py -b {benchmark} -r {this_run} -o {output_report} -d {design} -rp {run_path}".format( benchmark=args.benchmark, this_run=report_file_name + ".csv", output_report=report_file_name + "_design_test_report.csv", design=design, run_path=run_path, ) subprocess.check_output(design_benchmark_comp_cmd.split()) except subprocess.CalledProcessError as e: error_msg = e.stderr.decode(sys.getfilesystemencoding()) log.error( "{design} {tag} failed to compare with benchmark: {error_msg}" .format(design=design, tag=tag, error_msg=error_msg)) if args.clean: try: log.info("{design} {tag} Cleaning tmp/...".format( design=design, tag=tag)) moveUnPadded_cmd = ( "cp {run_path}/tmp/merged_unpadded.lef {run_path}/results/" .format(run_path=run_path, tag=tag)) subprocess.check_output(moveUnPadded_cmd.split()) clean_cmd = "rm -rf {run_path}/tmp/".format(run_path=run_path, tag=tag) subprocess.check_output(clean_cmd.split()) log.info("{design} {tag} tmp/ cleaned.".format(design=design, tag=tag)) except subprocess.CalledProcessError as e: error_msg = e.stderr.decode(sys.getfilesystemencoding()) log.error( "{design} {tag} failed to clean the tmp directory: {error_msg}" .format(design=design, tag=tag, error_msg=error_msg)) if tarList[0] != "": log.info("{design} {tag} Compressing run directory...".format( design=design, tag=tag)) try: if "all" in tarList: tarAll_cmd = "tar -cvzf {run_path}../{design_name}_{tag}.tar.gz {run_path}".format( run_path=run_path, design_name=design_name, tag=tag) subprocess.check_output(tarAll_cmd.split()) else: tarString = "tar -cvzf {run_path}../{design_name}_{tag}.tar.gz" for dirc in tarList: tarString += " {run_path}" + dirc tar_cmd = tarString.format(run_path=run_path, design_name=design_name, tag=tag) subprocess.check_output(tar_cmd.split()) log.info( "{design} {tag} Compressing Run Directory Finished".format( design=design, tag=tag)) except subprocess.CalledProcessError as e: log.info( "{design} {tag} Compressing Run Directory Failed".format( design=design, tag=tag)) if args.delete: try: log.info("{design} {tag} Deleting run directory...".format( design=design, tag=tag)) deleteDirectory = "rm -rf {run_path}".format(run_path=run_path) subprocess.check_output(deleteDirectory.split()) log.info("{design} {tag} Run directory deleted.".format( design=design, tag=tag)) except subprocess.CalledProcessError as e: error_msg = e.stderr.decode(sys.getfilesystemencoding()) log.error( "{design} {tag} failed to delete the run directory: {error_msg}" .format(design=design, tag=tag, error_msg=error_msg))
def cli(config_tag, regression, tag, threads, configuration_parameters, tar_list, excluded_designs, benchmark, print_rem, enable_timestamp, append_configurations, delete, show_output, designs): """ Run multiple designs in parallel, for testing or exploration. """ designs = list(designs) excluded_designs = excluded_designs.split(",") for excluded_design in excluded_designs: if excluded_design in designs: designs.remove(excluded_design) show_log_output = show_output and (len(designs) == 1) and (regression is None) if print_rem is not None and show_log_output == False: if float(print_rem) > 0: mutex = threading.Lock() print_rem_time = float(print_rem) else: print_rem_time = None else: print_rem_time = None if print_rem_time is not None: rem_designs = dict.fromkeys(designs, 1) num_workers = int(threads) config = config_tag tarList = tar_list.split(",") if regression is not None: regressionConfigurationsList = [] regressionFileOpener = open(regression, "r") regressionFileContent = regressionFileOpener.read().split() regressionFileOpener.close() for k in regressionFileContent: if k.find("=") == -1: continue if k.find("extra") != -1: break else: regressionConfigurationsList.append(k.split("=")[0]) if len(regressionConfigurationsList): ConfigHandler.update_configuration_values( regressionConfigurationsList, True) if configuration_parameters is not None: if configuration_parameters == "all": ConfigHandler.update_configuration_values_to_all( append_configurations) else: try: with open(configuration_parameters, "r") as f: configuration_parameters = f.read().split(",") ConfigHandler.update_configuration_values( configuration_parameters, append_configurations) except OSError: print("Could not open/read file:", configuration_parameters) sys.exit() store_dir = "" report_file_name = "" if enable_timestamp: store_dir = "./regression_results/{tag}_{date}/".format( tag=tag, date=datetime.datetime.now().strftime("%d_%m_%Y_%H_%M")) report_file_name = "{store_dir}/{tag}_{date}".format( store_dir=store_dir, tag=tag, date=datetime.datetime.now().strftime("%d_%m_%Y_%H_%M"), ) else: store_dir = "./regression_results/{tag}/".format(tag=tag) report_file_name = "{store_dir}/{tag}".format(store_dir=store_dir, tag=tag) if os.path.exists(store_dir) == False: os.makedirs(store_dir, exist_ok=True) log = logging.getLogger("log") log_formatter = logging.Formatter( "[%(asctime)s - %(levelname)5s] %(message)s") handler1 = logging.FileHandler( "{report_file_name}.log".format(report_file_name=report_file_name), "w") handler1.setFormatter(log_formatter) log.addHandler(handler1) handler2 = logging.StreamHandler() handler2.setFormatter(log_formatter) log.addHandler(handler2) log.setLevel(logging.INFO) report_log = logging.getLogger("report_log") report_formatter = logging.Formatter("%(message)s") report_handler = logging.FileHandler( "{report_file_name}.csv".format(report_file_name=report_file_name), "w") report_handler.setFormatter(report_formatter) report_log.addHandler(report_handler) report_log.setLevel(logging.INFO) report_log.info(Report.get_header() + "," + ConfigHandler.get_header()) allow_print_rem_designs = False def printRemDesignList(): t = threading.Timer(print_rem_time, printRemDesignList) t.start() if allow_print_rem_designs: print("Remaining designs (design, # of times): ", rem_designs) if len(rem_designs) == 0: t.cancel() def rmDesignFromPrintList(design): if design in rem_designs.keys(): mutex.acquire() try: rem_designs[design] -= 1 if rem_designs[design] == 0: rem_designs.pop(design) finally: mutex.release() if print_rem_time is not None: printRemDesignList() allow_print_rem_designs = True def update(status: str, design: str, message: str = None, error: bool = False): str = "[%-5s] %-20s" % (status, design) if message is not None: str += f": {message}" if error: log.error(str) else: log.info(str) flow_failure_flag = False design_failure_flag = False def run_design(designs_queue): nonlocal design_failure_flag, flow_failure_flag while not designs_queue.empty(): design, config, tag, design_name = designs_queue.get( timeout=3) # 3s timeout run_path = utils.get_run_path(design=design, tag=tag) update("START", design) command = [ os.getenv("OPENLANE_ENTRY") or "./flow.tcl", "-design", design, "-tag", tag, "-config_tag", config, "-overwrite", "-no_save" ] + ([] if show_log_output else ["-disable_output"]) skip_rm_from_rems = False try: if show_log_output: subprocess.check_call(command) else: subprocess.check_output(command, stderr=subprocess.PIPE) except subprocess.CalledProcessError as e: if print_rem_time is not None: rmDesignFromPrintList(design) skip_rm_from_rems = True run_path_relative = os.path.relpath(run_path, ".") update("FAIL", design, f"Check {run_path_relative}/flow_summary.log", error=True) design_failure_flag = True if print_rem_time is not None and not skip_rm_from_rems: rmDesignFromPrintList(design) update("DONE", design, "Writing report...") params = ConfigHandler.get_config(design, tag) report = Report(design, tag, design_name, params).get_report() report_log.info(report) with open(f"{run_path}/report.csv", "w") as report_file: report_file.write(Report.get_header() + "," + ConfigHandler.get_header()) report_file.write("\n") report_file.write(report) if benchmark is not None: try: update("DONE", design, "Comparing with benchmark results...") subprocess.check_output([ "python3", "./scripts/compare_regression_design.py", "--output-report", f"{report_file_name}.rpt.yml", "--benchmark", benchmark, "--design", design, "--run-path", run_path, f"{report_file_name}.csv" ], stderr=subprocess.PIPE) except subprocess.CalledProcessError as e: error_msg = e.stderr.decode("utf8") update("ERROR", design, f"Failed to compare with benchmark: {error_msg}") flow_failure_flag = True if tarList[0] != "": update("DONE", design, "Compressing run directory...") try: tarball_path = os.path.realpath( os.path.join(run_path, "..", f"{design_name}_{tag}.tar.gz")) tar_cmd = ["tar", "-czvf", tarball_path] if "all" in tarList: tar_cmd += [run_path] else: tar_cmd += list( map(lambda x: f"{run_path}/{x}", tarList)) subprocess.check_output(tar_cmd) update("DONE", design, "Compressed run directory.") except subprocess.CalledProcessError as e: update("ERROR", design, "Failed to compress run directory.", error=True) flow_failure_flag = True if delete: try: update("DONE", design, "Deleting run directory...") shutil.rmtree(run_path) update("DONE", design, "Deleted run directory.") except FileNotFoundError as e: pass except Exception as e: update("ERROR", design, "Failed to delete run directory.", error=True) flow_failure_flag = True q = queue.Queue() total_runs = 0 if regression is not None: regression_file = os.path.join(os.getcwd(), regression) number_of_configs = 0 for design in designs: base_path = utils.get_design_path(design=design) if base_path is None: update("ERROR", design, f"Cannot run: Not found", error=True) if print_rem_time is not None: if design in rem_designs.keys(): rem_designs.pop(design) continue err, design_name = utils.get_design_name(design, config) if err is not None: update("ERROR", design, f"Cannot run: {err}", error=True) continue base_config_path = base_path + "base_config.tcl" ConfigHandler.gen_base_config(design, base_config_path) number_of_configs = subprocess.check_output([ "./scripts/config/generate_config.py", f"{base_path}/config_{tag}_", base_config_path, regression_file ]) number_of_configs = int( number_of_configs.decode(sys.getdefaultencoding())) total_runs = total_runs + number_of_configs if print_rem_time is not None: rem_designs[design] = number_of_configs for i in range(number_of_configs): config_tag = f"config_{tag}_{i}" config_file = f"{base_path}/{config_tag}".format( base_path=base_path, config_tag=config_tag, ) q.put((design, config_file, config_tag, design_name)) else: for design in designs: base_path = utils.get_design_path(design=design) if base_path is None: update("ALERT", design, "Not found, skipping...") if print_rem_time is not None: if design in rem_designs.keys(): rem_designs.pop(design) continue default_config_tag = "config_{tag}".format(tag=tag) err, design_name = utils.get_design_name(design, config) if err is not None: update("ERROR", design, f"Cannot run: {err}") continue q.put((design, config, default_config_tag, design_name)) workers = [] for i in range(num_workers): workers.append(threading.Thread(target=run_design, args=(q, ))) workers[i].start() for i in range(num_workers): while workers[i].is_alive() == True: workers[i].join(100) log.info(f"Exiting thread {i}...") log.info("Getting top results...") subprocess.check_output([ "python3", "./scripts/report/get_best.py", "-i", report_handler.baseFilename, "-o", f"{report_file_name}_best.csv" ]) utils.add_computed_statistics(report_file_name + ".csv") utils.add_computed_statistics(report_file_name + "_best.csv") if benchmark is not None: log.info("Benchmarking...") full_benchmark_comp_cmd = [ "python3", "./scripts/compare_regression_reports.py", "--no-full-benchmark", "--benchmark", benchmark, "--output-report", f"{report_file_name}.rpt", "--output-xlsx", f"{report_file_name}.rpt.xlsx", f"{report_file_name}.csv" ] subprocess.check_output(full_benchmark_comp_cmd) log.info("Done.") if design_failure_flag: exit(2) if flow_failure_flag: exit(1)
def run_design(designs_queue): nonlocal design_failure_flag, flow_failure_flag while not designs_queue.empty(): design, config, tag, design_name = designs_queue.get( timeout=3) # 3s timeout run_path = utils.get_run_path(design=design, tag=tag) update("START", design) command = [ os.getenv("OPENLANE_ENTRY") or "./flow.tcl", "-design", design, "-tag", tag, "-config_tag", config, "-overwrite", "-no_save" ] + ([] if show_log_output else ["-disable_output"]) skip_rm_from_rems = False try: if show_log_output: subprocess.check_call(command) else: subprocess.check_output(command, stderr=subprocess.PIPE) except subprocess.CalledProcessError as e: if print_rem_time is not None: rmDesignFromPrintList(design) skip_rm_from_rems = True run_path_relative = os.path.relpath(run_path, ".") update("FAIL", design, f"Check {run_path_relative}/flow_summary.log", error=True) design_failure_flag = True if print_rem_time is not None and not skip_rm_from_rems: rmDesignFromPrintList(design) update("DONE", design, "Writing report...") params = ConfigHandler.get_config(design, tag) report = Report(design, tag, design_name, params).get_report() report_log.info(report) with open(f"{run_path}/report.csv", "w") as report_file: report_file.write(Report.get_header() + "," + ConfigHandler.get_header()) report_file.write("\n") report_file.write(report) if benchmark is not None: try: update("DONE", design, "Comparing with benchmark results...") subprocess.check_output([ "python3", "./scripts/compare_regression_design.py", "--output-report", f"{report_file_name}.rpt.yml", "--benchmark", benchmark, "--design", design, "--run-path", run_path, f"{report_file_name}.csv" ], stderr=subprocess.PIPE) except subprocess.CalledProcessError as e: error_msg = e.stderr.decode("utf8") update("ERROR", design, f"Failed to compare with benchmark: {error_msg}") flow_failure_flag = True if tarList[0] != "": update("DONE", design, "Compressing run directory...") try: tarball_path = os.path.realpath( os.path.join(run_path, "..", f"{design_name}_{tag}.tar.gz")) tar_cmd = ["tar", "-czvf", tarball_path] if "all" in tarList: tar_cmd += [run_path] else: tar_cmd += list( map(lambda x: f"{run_path}/{x}", tarList)) subprocess.check_output(tar_cmd) update("DONE", design, "Compressed run directory.") except subprocess.CalledProcessError as e: update("ERROR", design, "Failed to compress run directory.", error=True) flow_failure_flag = True if delete: try: update("DONE", design, "Deleting run directory...") shutil.rmtree(run_path) update("DONE", design, "Deleted run directory.") except FileNotFoundError as e: pass except Exception as e: update("ERROR", design, "Failed to delete run directory.", error=True) flow_failure_flag = True