def main(format: str = "vcd"): relog.step("Search waveforms") vcd_path = None view = None for path in Path(DEFAULT_TMPDIR).rglob("**/*.%s" % format): vcd_path = path break relog.info(vcd_path) for path in Path(os.path.dirname(DEFAULT_TMPDIR)).rglob("**/*.gtkw"): view = path break relog.info("loading view %s" % view) # define what to open file_to_read = view if view else vcd_path relog.step("Open waveforms") if sys.platform == "linux" or sys.platform == "linux2": # linux executor.sh_exec("gtkwave '%s'" % file_to_read, MAX_TIMEOUT=-1, SHELL=False) elif sys.platform == "darwin": # OS X executor.sh_exec("open -a gtkwave '%s'" % file_to_read, MAX_TIMEOUT=-1, SHELL=False) elif sys.platform == "win32": # Windows... executor.sh_exec("gtkwave '%s'" % file_to_read, MAX_TIMEOUT=-1, SHELL=False) else: relog.error("Unknown operating system") return (0, 0)
def main(format): file = find_raw() if file: stats = run(file) return stats relog.error("No raw file detected") return 0, 0
def main(cwd, sim_only: bool = False, cov_only: bool = False, lint_only: bool = False): batch = read_batch(cwd) if batch: run(cwd, batch, sim_only, cov_only, lint_only) else: relog.error("No Batch.list file found")
def main(files, params): top = params.get("TOP_MODULE") print(top) if not top: for file in files: print(file) if file.endswidth(".asc"): top = file break if top: stats = run(top) return stats else: relog.error("No asc file detected") return 0, 0
def watch_log(log_file: str): # remove previous execution log file if os.path.exists(log_file): os.remove(log_file) t_start = time.time() # wait log file created while is_file_timeout(log_file, t_start, max=15, shall_exist=False): time.sleep(1) if not os.path.exists(log_file): relog.error("No simulation log file found") return False # wait the end of the simulation count = 0 while not simulation_finished(log_file) and count < 2500: time.sleep(1) count += 1 return count < 500
def parse(file: str): """ generate an hex/bin file from file into out """ if not os.path.exists(file): relog.error("File Not Found: %s" % file) return segment = 0 with open(file, "r+") as fp: for i, line in enumerate(fp): line = line.strip() if line[0] != ":": relog.error("missing colon at line %d" % (i+1)) if len(line) < 11: relog.error("line %d is too short" % (i+1)) if not check_checksum(line): relog.error("checksum invalid at line %d" % (i+1)) # split fields byte_count, addr, record_type, data = line_split(line) # update address = segment + int(addr, 16) # is data if record_type == "00": yield (address, data) # is eof elif record_type == "01": if byte_count != "00" and addr != "0000": relog.error("wrong EOF should be 00000001FF at line %d" % (i+1)) return # is extended address for up to 1MB elif record_type == "02": segment = int(data, 16) << 4 # is extended address for up to 4GB elif record_type == "04": segment = int(data, 16) << 16 # precise start address elif record_type == "03": pass elif record_type == "05": pass return
def main(db): warnings, errors = 0, 0 time = db["time"] # 1.5 ns after V(a)/V(b) change va = series.Series(time, db["values"].get("V(a)")) vb = series.Series(time, db["values"].get("V(a)")) crossing_times = va.cross(va.max_value() / 2) crossing_times.extend(vb.cross(vb.max_value() / 2)) # check results V(out) = V(out_ref) vout = series.Series(time, db["values"].get("V(q)")) vout_ref = series.Series(time, db["values"].get("V(q_ref)")) nb_steps = db["nb_steps"] steps_idx = db["steps_idx"] utils.graphs.default_plot_style() plt.figure(figsize=(4, max(3, nb_steps * 1.5))) for i, idxs in enumerate(steps_idx): l, h = idxs plt.subplot(311 + i) plt.plot(vout_ref.x[l:h] * 1e9, vout_ref.y[l:h]) plt.plot(vout.x[l:h] * 1e9, vout.y[l:h]) plt.xlabel("Time [ns]") plt.ylabel("Voltage [V]") plt.tight_layout() plt.savefig("./.tmp_sim/xor.svg") tolerance = (vout_ref.max_value() - vout_ref.min_value()) * 0.01 for t_change in crossing_times: t_check = t_change + 1.5e-9 y = vout.value_at(t_check) y_ref = vout_ref.value_at(t_check) # check value and give more details if abs(y_ref - y) > tolerance: relog.error( "unexpected output at t = %e: get %.3f V and expect %.3f V +- %.3f V" % (t_check, y, y_ref, tolerance) ) errors += 1 # return values for regressions return warnings, errors
def read_batch(batch_file: str): # parser for config file batch = configparser.ConfigParser( allow_no_value=True, strict=True, empty_lines_in_values=False, inline_comment_prefixes=("#", ";"), ) # keep case of string batch.optionxform = str # override section regex batch.SECTCRE = re.compile(r"[ \t]*(?P<header>[^:]+?)[ \t]*:") # check input exist if not os.path.exists(batch_file): raise Exception("%s does not exist" % batch_file) # get batch description file path if os.path.isdir(batch_file): filepath = utils.normpath(os.path.join(batch_file, "Batch.list")) else: filepath = batch_file # parse the batch file try: batch.read([filepath]) except configparser.DuplicateSectionError as dse: relog.error(( "batch cannot accept duplicate rules\n\t", "consider apply a label 'folder > label [@SIM_TYPE]:' to %s" % dse.section, "\n\tor a in this format 'do SIM_TYPE on folder as label:'", )) except configparser.MissingSectionHeaderError: # add folder of a tc in default category # !! should be processed in normalize!! with open(filepath, "r+") as fp: batch.read_string("default:\n" + fp.read()) normalize_config(batch) return batch
parser.add_argument("-i", "--input", help="asc file to be read") parser.add_argument("-f", "--format", help="output format [ps|jpg]") parser.add_argument("-L", "--library", help="library path of asc", nargs='*') parser.add_argument("-o", "--output", help="output file", default=None) args = parser.parse_args() if args.library: for lib in args.library: if os.path.exists(lib): PSFile.add_library_path(lib) if args.output is None: args.output = "%s.ps" % os.path.splitext(args.input)[0] if "ps" in args.format.lower(): try: PSFile.create_from_ltspice_schematic(args.input, ltspice_asc.Schematic, ltspice_asc.Symbol, args.output) except FileExistsError: os.remove(args.output) PSFile.create_from_ltspice_schematic(args.input, ltspice_asc.Schematic, ltspice_asc.Symbol, args.output) except Exception as e: relog.error(e.args)
def prepare(files, PARAMS): relog.step("Prepation") # create temporary directory os.makedirs(DEFAULT_TMPDIR, exist_ok=True) # find simulation waveforms (vcd, ...) WAVE = None for wavefile in Path(os.path.dirname(DEFAULT_TMPDIR)).rglob("**/*.vcd"): WAVE = str(wavefile) _, WAVE_FORMAT = os.path.splitext(WAVE) WAVE_FORMAT = WAVE_FORMAT[1:] break if WAVE is None: relog.error("run a simulation first with vcd output") exit(0) # create the list of sources FILES = [f for f, m in files] MIMES = list(set([m for f, m in files])) INCLUDE_DIRS = resolve_includes(FILES) # generate data modules = PARAMS["COV_MODULES"][0].split( " ") if "COV_MODULES" in PARAMS else ["top"] instances = verilog.find_instances(PARAMS["TOP_MODULE"]) top_module, *_ = verilog.find_modules(PARAMS["TOP_MODULE"])[0] instances = [(mod, instance) for mod, pa, instance, po in instances if mod in modules] generation = 3 if any(["SYS" in m for m in MIMES]) else 2 excludes = PARAMS["IP_MODULES"][0].split( " ") if "IP_MODULES" in PARAMS else [] # generate scripts if instances: for k, instance in enumerate(instances): data = { "modules": modules, "instance": instance, "generation": generation, "excludes": excludes, "includes": INCLUDE_DIRS, "files": FILES, "vcd": WAVE, "top": top_module } # generate command file _tmp = Template( filename=os.path.join(TOOLS_DIR, "./score.cmd.mako")) with open(SCORE_SCRIPT % k, "w+") as fp: fp.write(_tmp.render_unicode(**data)) return len(instances) else: data = { "modules": modules, "instance": "", "generation": generation, "excludes": excludes, "includes": INCLUDE_DIRS, "files": FILES, "vcd": WAVE, "top": top_module } # generate command file _tmp = Template(filename=os.path.join(TOOLS_DIR, "./score.cmd.mako")) with open(SCORE_SCRIPT % 0, "w+") as fp: fp.write(_tmp.render_unicode(**data)) return 1
def ish_exec( cmd: str, log: str = None, mode: str = "w+", MAX_TIMEOUT: int = 300, SHOW_CMD: bool = False, SHELL: bool = False, CWD: str = None, ENV: object = None, NOERR: bool = False, ): """ simplify code for executing shell command """ tokens = shlex.split(cmd) try: if CWD is None and ENV is None: proc = subprocess.Popen( tokens, stdout=subprocess.PIPE, stderr=subprocess.PIPE if NOERR else subprocess.STDOUT, shell=SHELL, ) elif ENV is None: proc = subprocess.Popen( tokens, stdout=subprocess.PIPE, stderr=subprocess.PIPE if NOERR else subprocess.STDOUT, shell=SHELL, cwd=CWD, ) else: proc = subprocess.Popen( tokens, stdout=subprocess.PIPE, stderr=subprocess.PIPE if NOERR else subprocess.STDOUT, shell=SHELL, cwd=CWD, env=ENV, ) yield proc if log is not None: with open(log, mode) as fp: if SHOW_CMD: fp.write("%s\n" % cmd) for line in proc.stdout: sys.stdout.write(format_line(line.decode("utf-8"))) # remove color code of log.vh amond other things content = list(relog.filter_stream(line)) if content: fp.write("%s\n" % content[0]) else: for line in proc.stdout: sys.stdout.write(format_line(line.decode("utf-8"))) proc.stdout.close() return_code = proc.wait() if return_code: raise subprocess.CalledProcessError(return_code, cmd) except (OSError, subprocess.CalledProcessError): return False except subprocess.TimeoutExpired: relog.error("Unexpected executer timeout") return False else: return True
def load_raw(filename): """ Parses an ascii raw data file, generates and returns a dictionary with the following structure: { "title": <str>, "date:": <str>, "plotname:": <str>, "flags:": <str>, "no_vars:": <str>, "no_points:": <str>, "vars": [ { "idx": <int>, "name": <str>, "type": <str> }, { "idx": <int>, "name": <str>, "type": <str> } ... { "idx": <int>, "name": <str>, "type": <str> } ] "values": { "var1": <numpy.ndarray>, "var2": <numpy.ndarray>, ... "varN": <numpy.ndarray> } } Arguments: :filename: path to file with raw data. Returns dict with structure described above. """ filename = utils.normpath(filename) if not filename.endswith(".raw"): for raw in Path(filename).rglob("**/*.raw"): if not ".op.raw" in str(raw): filename = str(raw) break filename = utils.normpath(filename) print(filename) ret, header = {}, [] mode, data, time = None, None, None binary_index = 0 with open(filename, "rb") as f: for line in f: if line[0] == b"\x00": sline = str(line.decode("utf-16-be", errors="ignore"), encoding="utf-8").strip() else: sline = str(line.replace(b"\x00", b""), encoding="utf-8").strip() if "Binary:" not in sline and "Values:" not in sline: header.append(sline) else: if "Values:" in sline: relog.error("Ascii waveforms are not yet supported") binary_index = f.tell() + 1 break # get simulation informations RE_KEY_VALUE = r"(?P<key>[A-Z][\w \.]+):\s*(?P<value>\w.*\w)" ret = {} matches = re.finditer(RE_KEY_VALUE, "\n".join(header)) for match in matches: k, v = match.groups() if "Variables" != k: ret[k.lower().replace(". ", "_")] = v matches = re.search(r"^Variables:\s*(?P<vars>\w.*\w)", "\n".join(header), flags=re.MULTILINE | re.DOTALL) ret.update(matches.groupdict()) if not ret: relog.error("No information found in raw file") exit(0) # normalize ret["tools"] = ret.pop("command") ret["no_vars"] = int(ret.pop("no_variables")) ret["no_points"] = int(ret["no_points"]) ret["offset"] = float(ret["offset"]) # vars pattern = r"\s*(?P<idx>\d+)\s+" r"(?P<name>\S+)\s+" r"(?P<type>.*)\s*" m_vars = re.finditer(pattern, ret["vars"]) def transform(i): d = i.groupdict() d["idx"] = int(d["idx"]) return d ret["vars"] = sorted((transform(i) for i in m_vars), key=lambda x: x["idx"]) # determine mode if "FFT" in ret["plotname"]: mode = "FFT" elif "Transient" in ret["plotname"]: mode = "Transient" elif "AC" in ret["plotname"]: mode = "AC" # parse binary section nb_vars = ret["no_vars"] nb_pts = ret["no_points"] data, freq, time = [], None, None # read number of steps in the log file nb_steps = 0 with open(filename.replace(".raw", ".log"), "r+") as fp: for line in fp: if line.startswith(".step"): nb_steps += 1 ret["nb_steps"] = nb_steps steps_indices = [] if mode == "FFT" or mode == "AC": data = np.fromfile(filename, dtype=np.complex128, offset=binary_index) freq = np.abs(data[::nb_vars]) data = np.reshape(data, (nb_pts, nb_vars)) elif mode == "Transient": # time is 8 bytes but is also part of variables # values for each variable is 4 bytes # so expect to have (nb_vars-1) * 4 + 8 = (nb_vars + 1) * 4 # for each point: in total nb_pts * (nb_vars + 1) * 4 buf_length = nb_pts * (nb_vars + 1) * 4 # check file size to know if stepped simulation is_stepped = os.stat(filename).st_size > buf_length + binary_index print(f"stepped simulation: {nb_steps}") with open(filename, "rb") as fp: # read data fp.seek(binary_index) data = np.frombuffer(fp.read(buf_length), dtype=np.float32) # calculate time axis time = [] for i in range(nb_pts): fp.seek(binary_index + i * (nb_vars + 1) * 4) t = struct.unpack("d", fp.read(8))[0] time.append(t) if i > 0 and t == 0: steps_indices.append(i) steps_indices.append(nb_pts) # reshape data data = np.array(data).reshape((nb_pts, nb_vars + 1)) ret["steps_idx"] = [(0, j) if i == 0 else (steps_indices[i - 1], j) for i, j in enumerate(steps_indices)] ret["values"] = { ret["vars"][i - 1].get("name", ""): data[:, i] for i in range(2, nb_vars) } ret["freq"] = freq ret["time"] = time return ret
def read_from(sources_list: str, no_logger: bool = False, no_stdout: bool = True): files = [] parameters = {} # check input exist if not os.path.exists(sources_list): raise Exception("%s does not exist" % sources_list) # add the log package file if not no_logger: log_inc = os.path.join(os.environ["REFLOW"], "digital/packages/log.svh") if no_stdout: files.append((log_inc, utils.files.get_type(log_inc))) else: print(log_inc, utils.files.get_type(log_inc), sep=";") # store the list of files graph = {} try: graph = read_sources(sources_list, {}) except FileNotFoundError as e: relog.error("'%s' not found" % (e.filename or e.filename2)) exit(1) except Exception: traceback.print_exc(file=sys.stderr) # display the list of files and their mime-type for node in graph: if isinstance(node, Node): _t = utils.files.get_type(node.name) if _t: if no_stdout: files.append((node.name, _t)) else: print(node.name, _t, sep=";") # list the parameters # from graph on reverse orders to apply the latest # value of the parameter in the hierarchy for node in graph[::-1]: if no_stdout and isinstance(node, Node): parameters.update(node.params) elif no_stdout: pass else: for key, value in node.params.items(): print("%s\t:\t%s" % (key, value)) # define the most accurate timescale define min_ts = (1, "s", 1, "ms") for node in graph: if not isinstance(node, Node): continue if not utils.files.is_digital(node.name): continue ts = verilog.find_timescale(node.name) if ts: sn, su, rn, ru = ts[0] if utils.parsers.evaluate_eng_unit(sn, su) < utils.parsers.evaluate_eng_unit( *min_ts[0:2] ): min_ts = (sn, su, *min_ts[2:4]) if utils.parsers.evaluate_eng_unit(rn, ru) < utils.parsers.evaluate_eng_unit( *min_ts[2:4] ): min_ts = (*min_ts[0:2], rn, ru) if utils.parsers.evaluate_eng_unit(*min_ts[0:2]) == 1.0: print("TIMESCALE\t:\t'1ns/100ps'") parameters["TIMESCALE"] = "1ns/100ps" else: print("TIMESCALE\t:\t'%s%s/%s%s'" % min_ts) parameters["TIMESCALE"] = "%s%s/%s%s" % min_ts # define the top module if isinstance(graph[-1], Node): print("TOP_MODULE\t:\t'%s'" % graph[-1].name) parameters["TOP_MODULE"] = graph[-1].name if no_stdout: # normalize path of files accross platform files = [(f.replace("\\", "/"), m) for f, m in files] return files, parameters