def run_tasks( args, configs, ): """ Runs the specified set of tasks (configs) """ start = datetime.now() num_failed = 0 jobs = create_jobs(args, configs) run_dirs = {} for config in configs: task_dir = find_task_dir(config) task_run_dir = get_next_run_dir(task_dir) run_dirs[config.task_name] = task_run_dir # We could potentially support other 'run' systems (e.g. a cluster), # rather than just the local machine if args.system == "local": assert args.j > 0, "Invalid number of processors" if args.run: num_failed = run_parallel(args, jobs, run_dirs) print("Elapsed time: {}".format( format_elapsed_time(datetime.now() - start))) if args.parse: start = datetime.now() print("\nParsing test results...") if len(args.list_file) > 0: print("scripts/parse_vtr_task.py -l {}".format( args.list_file[0])) parse_tasks(configs, jobs) print("Elapsed time: {}".format( format_elapsed_time(datetime.now() - start))) if args.create_golden: create_golden_results_for_tasks(configs) if args.check_golden: num_failed += check_golden_results_for_tasks(configs) if args.calc_geomean: summarize_qor(configs) calc_geomean(args, configs) # This option generates a shell script (vtr_flow.sh) for each architecture, # circuit, script_params # The generated can be used to be submitted on a large cluster elif args.system == "scripts": for _, value in run_dirs.items(): Path(value).mkdir(parents=True) run_scripts = create_run_scripts(jobs, run_dirs) for script in run_scripts: print(script) else: raise VtrError( "Unrecognized run system {system}".format(system=args.system)) return num_failed
def vtr_command_main(arg_list, prog=None): """ Main function for parse_vtr_task Parses in the results from run_vtr_task.py """ # Load the arguments args = vtr_command_argparser(prog).parse_args(arg_list) try: task_names = args.task for list_file in args.list_file: task_names += load_list_file(list_file) config_files = [ find_task_config_file(task_name) for task_name in task_names ] configs = [ load_task_config(config_file) for config_file in config_files ] num_failed = 0 jobs = create_jobs(args, configs, after_run=True) parse_tasks(configs, jobs) if args.create_golden: create_golden_results_for_tasks(configs) if args.check_golden: num_failed += check_golden_results_for_tasks(configs) if args.calc_geomean: summarize_qor(configs) calc_geomean(args, configs) except CommandError as error: print("Error: {msg}".format(msg=error.msg)) print("\tfull command: ", error.cmd) print("\treturncode : ", error.returncode) print("\tlog file : ", error.log) num_failed += 1 except InspectError as error: print("Error: {msg}".format(msg=error.msg)) if error.filename: print("\tfile: ", error.filename) num_failed += 1 except VtrError as error: print("Error:", error.msg) num_failed += 1 return num_failed