def determine_lut_size(architecture_file): """ Determines the maximum LUT size (K) in an architecture file. Assumes LUTs are represented as BLIF '.names' """ arch_xml = ET.parse(architecture_file).getroot() lut_size = 0 saw_blif_names = False for elem in arch_xml.findall( ".//pb_type"): # Xpath recrusive search for 'pb_type' blif_model = elem.get("blif_model") if blif_model and blif_model == ".names": saw_blif_names = True input_port = elem.find("input") input_width = int(input_port.get("num_pins")) assert input_width > 0 # Keep the maximum lut size found (i.e. fracturable architectures) lut_size = max(lut_size, input_width) if saw_blif_names and lut_size == 0: raise InspectError("Could not identify valid LUT size (K)", filename=architecture_file) return lut_size
def determine_memory_addr_width(architecture_file): """ Determines the maximum RAM block address width in an architecture file Assumes RAMS are represented using the standard VTR primitives (.subckt single_port_ram, .subckt dual_port_ram etc.) """ arch_xml = ET.parse(architecture_file).getroot() mem_addr_width = 0 saw_ram = False for elem in arch_xml.findall(".//pb_type"): # XPATH for recursive search blif_model = elem.get("blif_model") if blif_model and "port_ram" in blif_model: saw_ram = True for input_port in elem.findall("input"): port_name = input_port.get("name") if "addr" in port_name: input_width = int(input_port.get("num_pins")) mem_addr_width = max(mem_addr_width, input_width) if saw_ram and mem_addr_width == 0: raise InspectError("Could not identify RAM block address width", filename=architecture_file) return mem_addr_width
def __init__(self, metric, min_value=None, max_value=None): super().__init__(metric) if max_value < min_value: raise InspectError("Invalid range specification (max value larger than min value)") self._min_value = min_value self._max_value = max_value
def determine_min_w(log_filename): """ determines the miniumum width. """ min_w_regex = re.compile(r"\s*Best routing used a channel width factor of (?P<min_w>\d+).") with open(log_filename) as file: for line in file: match = min_w_regex.match(line) if match: return int(match.group("min_w")) raise InspectError("Failed to find minimum channel width.", filename=log_filename)
def load_parse_patterns(parse_config_filepath): """ Loads the parse patterns from the desired file. These parse patterns are later used to load in the results file The lines of this file should be formated in either of the following ways: name;path;regex;[default value] name;path;regex """ parse_patterns = OrderedDict() for line in load_config_lines(parse_config_filepath): components = line.split(";") if len(components) == 3 or len(components) == 4: name = components[0] filepath = components[1] regex_str = components[2] default_value = None if len(components) == 4: default_value = components[3] if name not in parse_patterns: parse_patterns[name] = ParsePattern(name, filepath, regex_str, default_value) else: raise InspectError( "Duplicate parse pattern name '{}'".format(name), parse_config_filepath, ) else: raise InspectError("Invalid parse format line: '{}'".format(line), parse_config_filepath) return parse_patterns
def run_lec( reference_netlist, implementation_netlist, command_runner=CommandRunner(), temp_dir=Path("."), log_filename="abc.lec.out", abc_exec=None, ): """ Run Logical Equivalence Checking (LEC) between two netlists using ABC .. note :: Usage: vtr.abc.run_lec(<reference_netlist>,<implementation_netlist>,[OPTIONS]) Arguments ========= reference_netlist : The reference netlist to be commpared to implementation_netlist : The implemeted netlist to compare to the reference netlist Other Parameters ---------------- command_runner : A CommandRunner object used to run system commands temp_dir : Directory to run in (created if non-existent) log_filename : File to log result to abc_exec : ABC executable to be run """ temp_dir = Path(temp_dir) if not isinstance(temp_dir, Path) else temp_dir temp_dir.mkdir(parents=True, exist_ok=True) if abc_exec is None: abc_exec = str(paths.abc_exe_path) abc_script = ("dsec {ref} {imp}".format(ref=reference_netlist, imp=implementation_netlist), ) abc_script = "; ".join(abc_script) cmd = [abc_exec, "-c", abc_script] output, _ = command_runner.run_system_command(cmd, temp_dir=temp_dir, log_filename=log_filename, indent_depth=1) # Check if ABC's LEC engine passed lec_passed, errored = check_abc_lec_status(output) if errored: abc_script = ("cec {ref} {imp}".format(ref=reference_netlist, imp=implementation_netlist), ) abc_script = "; ".join(abc_script) cmd = [abc_exec, "-c", abc_script] output, _ = command_runner.run_system_command( cmd, temp_dir=temp_dir, log_filename="abc.cec.out", indent_depth=1) lec_passed, errored = check_abc_lec_status(output) if lec_passed is None: raise InspectError( "Couldn't determine Logical Equivalence status between {input} <-> {output}" .format(input=reference_netlist, output=implementation_netlist), filename=log_filename, ) if lec_passed is False: raise InspectError( "Logical Equivalence Check failed between {input} <-> {output}". format(input=reference_netlist, output=implementation_netlist), filename=log_filename, ) assert lec_passed
def load_pass_requirements(pass_requirements_filepath): """ Load the pass requirements from particular file The lines of the pass requiremtents file should follow one of the following format: name;Range(min,max) name;RangeAbs(min,max,absolute_value) name;Equal() """ parse_patterns = OrderedDict() for line in load_config_lines(pass_requirements_filepath): components = line.split(";") if len(components) != 2: raise InspectError( "Invalid pass requirement format line: '{}'".format(line), pass_requirements_filepath, ) metric = components[0] expr = components[1] if metric in parse_patterns: raise InspectError( "Duplicate pass requirement for '{}'".format(metric), pass_requirements_filepath, ) func, params_str = expr.split("(") params_str = params_str.rstrip(")") params = [] if params_str != "": params = params_str.split(",") if func == "Range": if len(params) != 2: raise InspectError( "Range() pass requirement function requires two arguments", pass_requirements_filepath, ) parse_patterns[metric] = RangePassRequirement( metric, float(params[0]), float(params[1])) elif func == "RangeAbs": if len(params) != 3: raise InspectError( "RangeAbs() pass requirement function requires two arguments", pass_requirements_filepath, ) parse_patterns[metric] = RangeAbsPassRequirement( metric, float(params[0]), float(params[1]), float(params[2])) elif func == "Equal": if len(params) != 0: raise InspectError( "Equal() pass requirement function requires no arguments", pass_requirements_filepath, ) parse_patterns[metric] = EqualPassRequirement(metric) else: raise InspectError( "Unexpected pass requirement function '{}' for metric '{}'". format(func, metric), pass_requirements_filepath, ) return parse_patterns
def check_passed(self, golden_value, check_value, check_string="golden value"): """ Check if parsed value is within acceptable range, absolute value or equal to golden value """ # Check for nulls if golden_value is None or check_value is None: if golden_value is None and check_value is None: ret_value = True ret_str = "both {} and check are None".format(check_string) elif golden_value is None: ret_value = False ret_str = "{} is None, but check value is {}".format( check_string, check_value) else: ret_value = False ret_str = "{} is {}, but check value is None".format( check_string, golden_value) return (ret_value, ret_str) assert golden_value is not None assert check_value is not None # Convert values to float original_golden_value = golden_value try: golden_value = float(golden_value) except ValueError: raise InspectError("Failed to convert {} '{}' to float".format( check_string, golden_value)) from ValueError original_check_value = check_value try: check_value = float(check_value) except ValueError: raise InspectError( "Failed to convert check value '{}' to float".format( check_value)) from ValueError # Get relative ratio norm_check_value = None if golden_value == 0.0: # Avoid division by zero if golden_value == check_value: return True, "{} and check both equal 0".format(check_string) norm_check_value = float("inf") else: norm_check_value = check_value / golden_value # Check if the original values match if original_check_value == original_golden_value: return True, "Check value equal to {}".format(check_string) # Check if value within range if (self.min_value() <= norm_check_value <= self.max_value() ) or abs(check_value - golden_value) <= self.abs_threshold(): return True, "relative value within range" return ( False, "relative value {} outside of range [{},{}], " "above absolute threshold {} and not equal to {} value: {}".format( norm_check_value, self.min_value(), self.max_value(), self.abs_threshold(), check_string, golden_value, ), )
def check_passed(self, golden_value, check_value, check_string="golden value"): """ Check if parsed value is within a range or equal to golden value """ if golden_value is None or check_value is None: if golden_value is None and check_value is None: ret_value = True ret_str = "both golden and check are None" elif check_value is None: ret_value = False ret_str = ("{} is {}, but check value is None".format( check_string, golden_value), ) else: ret_value = False ret_str = "{} is None, but check value is {}".format( check_string, check_value) return (ret_value, ret_str) assert golden_value is not None assert check_value is not None original_golden_value = golden_value try: golden_value = float(golden_value) except ValueError: raise InspectError("Failed to convert {} '{}' to float".format( check_string, golden_value)) from ValueError original_check_value = check_value try: check_value = float(check_value) except ValueError: raise InspectError( "Failed to convert check value '{}' to float".format( check_value)) from ValueError norm_check_value = None if golden_value == 0.0: # Avoid division by zero if golden_value == check_value: return True, "{} and check both equal 0".format(check_string) norm_check_value = float("inf") else: norm_check_value = check_value / golden_value if original_check_value == original_golden_value: return True, "Check value equal to {}".format(check_string) if self.min_value() <= norm_check_value <= self.max_value(): return True, "relative value within range" return ( False, "relative value {} outside of range [{},{}] " "and not equal to {} value: {}".format( norm_check_value, self.min_value(), self.max_value(), check_string, golden_value, ), )
def cmp_full_vs_incr_sta( architecture, circuit, circuit_name=None, command_runner=CommandRunner(), vpr_args=None, temp_dir=Path("."), vpr_exec=None, ): """ Sanity check that full STA and the incremental STA produce the same *.net, *.place, *.route files as well as identical timing report files .. note :: Use: vtr.vpr.cmp_full_vs_incr_sta(<architecture>,<circuit_name>,<circuit>,[OPTIONS]) Arguments ========= architecture: Architecture file circuit: Input circuit file Other Parameters ---------------- circuit_name: Name of the circuit file command_runner: CommandRunner object temp_dir: Directory to run in vpr_exec: Path to the VPR executable vpr_args: Extra arguments for VPR """ # Verify that files are Paths or convert them to Paths and check that they exist architecture = verify_file(architecture, "Architecture") circuit = verify_file(circuit, "Circuit") if not circuit_name: circuit_name = circuit.stem default_output_filenames = [ "{}.net".format(circuit_name), "{}.place".format(circuit_name), "{}.route".format(circuit_name), "report_timing.setup.rpt", "report_timing.hold.rpt", "report_unconstrained_timing.setup.rpt", "report_unconstrained_timing.hold.rpt", ] # The full STA flow should have already been run # directly rename the output files for filename in default_output_filenames: cmd = ["mv", filename, "full_sta_{}".format(filename)] command_runner.run_system_command( cmd, temp_dir=temp_dir, log_filename="move.out", indent_depth=1 ) # run incremental STA flow incremental_vpr_args = vpr_args incremental_vpr_args["timing_update_type"] = "incremental" run( architecture, circuit, circuit_name, command_runner, temp_dir, log_filename="vpr.incr_sta.out", vpr_exec=vpr_exec, vpr_args=incremental_vpr_args, ) # Rename the incremental STA output files for filename in default_output_filenames: cmd = ["mv", filename, "incremental_sta_{}".format(filename)] command_runner.run_system_command( cmd, temp_dir=temp_dir, log_filename="move.out", indent_depth=1 ) failed_msg = "Failed with these files (not identical):" identical = True for filename in default_output_filenames: cmd = [ "diff", "full_sta_{}".format(filename), "incremental_sta_{}".format(filename), ] _, cmd_return_code = command_runner.run_system_command( cmd, temp_dir=temp_dir, log_filename="diff.out", indent_depth=1 ) if cmd_return_code: identical = False failed_msg += " {}".format(filename) if not identical: raise InspectError(failed_msg)
def run_second_time( architecture, circuit, circuit_name=None, command_runner=CommandRunner(), temp_dir=Path("."), vpr_exec=None, second_run_args=None, rr_graph_ext=".xml", ): """ Run vpr again with additional parameters. This is used to ensure that files generated by VPR can be re-loaded by it .. note :: Usage: vtr.vpr.run_second_time(<architecture>,<circuit>,[OPTIONS]) Arguments ========= architecture: Architecture file circuit: Input circuit file Other Parameters ---------------- circuit_name: Name of the circuit file command_runner: CommandRunner object temp_dir: Directory to run in log_filename : File to log result to vpr_exec: Path to the VPR executable second_run_args: Extra arguments for VPR """ temp_dir = Path(temp_dir) if not isinstance(temp_dir, Path) else temp_dir temp_dir.mkdir(parents=True, exist_ok=True) rr_graph_out_file = "" if "write_rr_graph" in second_run_args: rr_graph_out_file = second_run_args["write_rr_graph"] rr_graph_ext = Path(rr_graph_out_file).suffix rr_graph_out_file2 = "rr_graph2" + rr_graph_ext if "write_rr_graph" in second_run_args: second_run_args["read_rr_graph"] = rr_graph_out_file second_run_args["write_rr_graph"] = rr_graph_out_file2 #run VPR run( architecture, circuit, circuit_name, command_runner, temp_dir, log_filename="vpr_second_run.out", vpr_exec=vpr_exec, vpr_args=second_run_args, ) if "write_rr_graph" in second_run_args: cmd = ["diff", rr_graph_out_file, rr_graph_out_file2] _, diff_result = command_runner.run_system_command( cmd, temp_dir, log_filename="diff.rr_graph.out", indent_depth=1 ) if diff_result: raise InspectError( "failed: vpr (RR Graph XML output not consistent when reloaded)" )