def ret_expected_runtime(job, work_dir): """ Returns the expected run-time (in seconds) of the specified run, or -1 if unkown """ seconds = -1 golden_results = load_parse_results( str( Path(work_dir).parent.parent.parent.parent / "config/golden_results.txt")) metrics = golden_results.metrics(job.arch(), job.circuit(), job.script_params()) if "vtr_flow_elapsed_time" in metrics: seconds = float(metrics["vtr_flow_elapsed_time"]) return seconds
def ret_expected_memory(job, work_dir): """ Returns the expected memory usage (in bytes) of the specified run, or -1 if unkown """ memory_kib = -1 golden_results = load_parse_results( str( Path(work_dir).parent.parent.parent.parent / "config/golden_results.txt")) metrics = golden_results.metrics(job.arch(), job.circuit(), job.script_params()) for metric in [ "max_odin_mem", "max_abc_mem", "max_ace_mem", "max_vpr_mem" ]: if metric in metrics and int(metrics[metric]) > memory_kib: memory_kib = int(metrics[metric]) return memory_kib
def check_two_files( config, first_results_filepath, second_results_filepath, first_name="task", second_name="golden", ): """ Compare two files results """ first_results = load_parse_results(first_results_filepath) second_results = load_parse_results(second_results_filepath) # Verify that the architecture and circuit are specified for param in ["architecture", "circuit", "script_params"]: if param not in first_results.PRIMARY_KEYS: raise InspectError( "Required param '{}' missing from {} results: {}".format( param, first_name, first_results_filepath ), first_results_filepath, ) if param not in second_results.PRIMARY_KEYS: raise InspectError( "Required param '{}' missing from {} results: {}".format( param, second_name, second_results_filepath ), second_results_filepath, ) # Verify that all params and pass requirement metric are included in both the result files # We do not worry about non-pass_requriements elements being different or missing pass_req_filepath = str(paths.pass_requirements_path / config.pass_requirements_file) pass_requirements = load_pass_requirements(pass_req_filepath) for metric in pass_requirements.keys(): for ((arch, circuit, script_params), result,) in first_results.all_metrics().items(): if metric not in result: raise InspectError( "Required metric '{}' missing from {} results".format(metric, first_name), first_results_filepath, ) for ((arch, circuit, script_params), result,) in second_results.all_metrics().items(): if metric not in result: raise InspectError( "Required metric '{}' missing from {} results".format(metric, second_name), second_results_filepath, ) # Load the primary keys for result files second_primary_keys = [] for (arch, circuit, script_params), _ in second_results.all_metrics().items(): second_primary_keys.append((arch, circuit, script_params)) first_primary_keys = [] for (arch, circuit, script_params), _ in first_results.all_metrics().items(): first_primary_keys.append((arch, circuit, script_params)) # Ensure that first result file has all the second result file cases for arch, circuit, script_params in second_primary_keys: if first_results.metrics(arch, circuit, script_params) is None: raise InspectError( "Required case {}/{} missing from {} results: {}".format( arch, circuit, first_name, first_results_filepath ) ) # Warn about any elements in first result file that are not found in second result file for arch, circuit, script_params in first_primary_keys: if second_results.metrics(arch, circuit, script_params) is None: print( "Warning: {} includes result for {}/{} missing in {} results".format( first_name, arch, circuit, second_name ) ) num_qor_failures = 0 # Verify that the first results pass each metric for all cases in the second results for (arch, circuit, script_params) in second_primary_keys: second_metrics = second_results.metrics(arch, circuit, script_params) first_metrics = first_results.metrics(arch, circuit, script_params) first_fail = True for metric in pass_requirements.keys(): if not metric in second_metrics: print("Warning: Metric {} missing from {} results".format(metric, second_name)) continue if not metric in first_metrics: print("Warning: Metric {} missing from {} results".format(metric, first_name)) continue try: metric_passed, reason = pass_requirements[metric].check_passed( second_metrics[metric], first_metrics[metric], second_name ) except InspectError as error: metric_passed = False reason = error.msg if not metric_passed: if first_fail: print( "\n{}...[Fail]".format( "/".join(str((Path(config.config_dir).parent)).split("/")[-3:]) ) ) first_fail = False print("[Fail]\n{}/{}/{} {} {}".format(arch, circuit, script_params, metric, reason)) num_qor_failures += 1 return num_qor_failures
def create_jobs(args, configs, longest_name=0, longest_arch_circuit=0, after_run=False): """ Create the jobs to be executed depending on the configs. """ jobs = [] for config in configs: for arch, circuit in itertools.product(config.archs, config.circuits): golden_results = load_parse_results( str( PurePath( config.config_dir).joinpath("golden_results.txt"))) abs_arch_filepath = resolve_vtr_source_file( config, arch, config.arch_dir) abs_circuit_filepath = resolve_vtr_source_file( config, circuit, config.circuit_dir) work_dir = str(PurePath(arch).joinpath(circuit)) run_dir = (str( Path(get_latest_run_dir(find_task_dir(config))) / work_dir) if after_run else str( Path(get_next_run_dir(find_task_dir(config))) / work_dir)) # Collect any extra script params from the config file cmd = [abs_circuit_filepath, abs_arch_filepath] # Check if additional architectural data files are present if config.additional_files_list_add: for additional_file in config.additional_files_list_add: flag, file_name = additional_file.split(',') cmd += [flag] cmd += [ resolve_vtr_source_file(config, file_name, config.arch_dir) ] if hasattr(args, "show_failures") and args.show_failures: cmd += ["-show_failures"] cmd += config.script_params if config.script_params else [] cmd += config.script_params_common if config.script_params_common else [] cmd += (args.shared_script_params if hasattr(args, "shared_script_params") and args.shared_script_params else []) # Apply any special config based parameters if config.cmos_tech_behavior: cmd += [ "-cmos_tech", resolve_vtr_source_file(config, config.cmos_tech_behavior, "tech"), ] cmd += ([ "--fix_pins", resolve_vtr_source_file(config, config.pad_file) ] if config.pad_file else []) if config.sdc_dir: sdc_name = "{}.sdc".format(Path(circuit).stem) sdc_file = resolve_vtr_source_file(config, sdc_name, config.sdc_dir) cmd += ["-sdc_file", "{}".format(sdc_file)] if config.place_constr_dir: place_constr_name = "{}.place".format(Path(circuit).stem) place_constr_file = resolve_vtr_source_file( config, place_constr_name, config.place_constr_dir) cmd += ["--fix_clusters", "{}".format(place_constr_file)] parse_cmd = None second_parse_cmd = None qor_parse_command = None if config.parse_file: parse_cmd = [ resolve_vtr_source_file( config, config.parse_file, str(PurePath("parse").joinpath("parse_config")), ) ] if config.second_parse_file: second_parse_cmd = [ resolve_vtr_source_file( config, config.second_parse_file, str(PurePath("parse").joinpath("parse_config")), ) ] if config.qor_parse_file: qor_parse_command = [ resolve_vtr_source_file( config, config.qor_parse_file, str(PurePath("parse").joinpath("qor_config")), ) ] # We specify less verbosity to the sub-script # This keeps the amount of output reasonable if hasattr(args, "verbosity") and max(0, args.verbosity - 1): cmd += ["-verbose"] if config.script_params_list_add: for value in config.script_params_list_add: jobs.append( create_job( args, config, circuit, arch, value, cmd, parse_cmd, second_parse_cmd, qor_parse_command, work_dir, run_dir, longest_name, longest_arch_circuit, golden_results, )) else: jobs.append( create_job( args, config, circuit, arch, None, cmd, parse_cmd, second_parse_cmd, qor_parse_command, work_dir, run_dir, longest_name, longest_arch_circuit, golden_results, )) return jobs