def initPaths(argsEvcPath): # {{{ '''Populate some convenient variables from args. ''' #module = inspect.stack()[-1][1] appPaths.basemodule = os.path.basename(os.path.realpath(__file__)) appPaths.directory = os.path.dirname(os.path.realpath(__file__)) appPaths.share = joinP(appPaths.directory, "share") appPaths.configDefault = joinP(appPaths.share, "configDefault.toml") paths.fname_evc = fnameAppendExt(argsEvcPath, "evc") outdir = os.path.splitext(paths.fname_evc)[0] + ".eva" paths.outdir = outdir paths.fname_evcx = joinP(outdir, "evcx.toml") paths.fname_cfg = joinP(outdir, "config.toml") paths.fname_cln = joinP(outdir, "clean.vcd") paths.fname_mea = joinP(outdir, "signals.vcd") paths.fname_meainfo = joinP(outdir, "signals.info.toml") paths.dname_mea = joinP(outdir, "signals") paths.dname_identicon = joinP(outdir, "identicon") paths._INITIALIZED = True return
def meaDbFromVcd(): # {{{ '''Apply post-processing steps to stage0. Extract changes from signals.vcd into fast-to-read binary form. signals.vcd has only 2 datatypes: bit, real Assume initial state for all measurements is 0.: All timestamps are 32b non-negative integers. Binary format for bit is different from that of real. bit: Ordered sequence of timestamps. real: Ordered sequence of (timestamp, value) pairs. All values are 32b IEEE754 floats, OR 32b(zext) fx. ''' verb("Creating binary database from VCD... ", end='') mkDirP(paths.dname_mea) with VcdReader(paths.fname_mea) as vcdi: # Stage0 file has bijective map between varId and varName by # construction, so take first (only) name for convenience. _mapVarIdToName = {varId: detypeVarName(nms[0]) \ for varId,nms in vcdi.mapVarIdToNames.items()} mapVarIdToName = {varId: nm \ for varId,nm in _mapVarIdToName.items() \ if isUnitIntervalMeasure(nm)} fds = {nm: open(joinP(paths.dname_mea, nm), 'wb') \ for varId,nm in mapVarIdToName.items()} prevValues = {varId: 0 for varId in mapVarIdToName.keys()} for newTime, changedVarIds, newValues in vcdi.timechunks: for varId, newValue in zip(changedVarIds, newValues): nm = mapVarIdToName.get(varId, None) if nm is None: continue tp, _, structFmt = meaDtype(nm) v, p = tp(newValue), prevValues[varId] if v != p: _packArgs = [newTime, v] if tp is float else [newTime] bs = struct.pack(structFmt, *_packArgs) fds[nm].write(bs) prevValues[varId] = v for _, fd in fds.items(): fd.close() verb("Done") return
def measureCompactHtml(name): # {{{ '''Return a compact representation of a measure name. Identicon and symbol on colored background. ''' spanFmt = '<span class="compact %s">%s%s</span>' mt, st, bn = measureNameParts(name) icon = rdTxt(joinP(paths.dname_identicon, bn + ".svg")) return spanFmt % (mt, icon, mapSiblingTypeToHtml[st])
def htmlTopFmt(body, inlineJs=True, inlineCss=True, bodyOnly=False): # {{{ '''Return a string with HTML headers for JS and CSS. ''' fnamesJs = (joinP(appPaths.share, fname) for fname in \ ("jquery-3.3.1.slim.min.js", "bootstrap-3.3.7.min.js", "eva.js")) fnamesCss = (joinP(appPaths.share, fname) for fname in \ ("bootstrap-3.3.7.min.css", "eva.css")) jsTxts = (('<script> %s </script>' % rdTxt(fname)) \ if inlineJs else \ ('<script type="text/javascript" src="%s"></script>' % fname) for fname in fnamesJs) cssTxts = (('<style> %s </style>' % rdTxt(fname)) \ if inlineCss else \ ('<link rel="stylesheet" type="text/css" href="%s">' % fname) for fname in fnamesCss) ret = '\n'.join(body) \ if bodyOnly else \ '\n'.join(r.strip() for r in ( '<!DOCTYPE html>', '<html>', ' <head>', '\n'.join(chain(jsTxts, cssTxts)), ' </head>', ' <body>', '\n'.join(body), ' </body>', '</html>', )) return ret
def createIdenticons(vcdInfo): # {{{ '''Produce an identicon for each signal in VCD. ''' verb("Creating identicons... ", end='') mkDirP(paths.dname_identicon) measureNames = vcdInfo["unitIntervalVarNames"] for nm in measureNames: measureType, siblingType, baseName = measureNameParts(nm) svgStr = identiconSpriteSvg(baseName, fill="darkgray") fname = joinP(paths.dname_identicon, baseName + ".svg") with open(fname, 'w') as fd: fd.write(svgStr) verb("Done") return
def vcdClean(fnamei, fnameo, comment=None): # {{{ '''Read in VCD with forgiving reader and write out cleaned version with strict writer. 1. Most frequently changing signals are assigned shorter varIds. 2. Redundant value changes are eliminated. 3. Empty timechunks are eliminated. 4. Timechunks are ordered. ''' # Imports just for vcdClean kept separately since this isn't strictly # required for just reading and writing VCD. from dmppl.base import joinP, rdLines from tempfile import mkdtemp from shutil import rmtree # Read/copy input to temporary file. # Required when input is STDIN because it's read multiple times. tmpd = mkdtemp() tmpf = joinP(tmpd, "tmpf.vcd") with open(tmpf, 'w') as fd: fd.write('\n'.join(rdLines(fnamei, commentLines=False))) timejumps, mapVarIdToTimejumps, mapVarIdToNumChanges = rdMetadata(tmpf) cleanComment = "<<< dmppl.vcd.vcdClean >>>" if comment is None else comment with VcdReader(tmpf) as vdi, \ VcdWriter(fnameo) as vdo: usedVarIds = [] vlistUnsorted = [] varaliases = [] for i, n, s, t in zip(vdi.varIds, vdi.varNames, vdi.varSizes, vdi.varTypes): if i not in usedVarIds: usedVarIds.append(i) var = (i, n, s, t) vlistUnsorted.append(var) else: alias = (vdi.mapVarIdToNames[i][0], n, t) varaliases.append(alias) # Sort varlist by number of changes. vlistSorted = sorted([(mapVarIdToNumChanges[i], i, n, s, t) \ for i,n,s,t in vlistUnsorted], reverse=True) varlist = [(n, s, t) for c, i, n, s, t in vlistSorted] vdo.wrHeader(varlist, comment=' '.join((vdi.vcdComment, cleanComment)), date=vdi.vcdDate, version=vdi.vcdVersion, timescale=' '.join(vdi.vcdTimescale), varaliases=varaliases) vdo.separateTimechunks = False # Omit blank lines between timechunks. # All timechunks are read in monotonic increasing time order, thanks to # the sort() in rdMetadata(). # Multiple (consecutive) timechunks referring to the same time will be # read in the same order as the input file, so last one wins. # Timechunks to write are put into a queue, then only written out when # a timechunk for a greater time is processed. wrqTime_, wrqChangedVars_, wrqNewValues_ = 0, [], [] _ = next(vdi.timechunks) # Initialize timechunks generator FSM. for newTime, fileOffset in timejumps: try: vdi.fd.seek(fileOffset) tci = next(vdi.timechunks) except StopIteration: # Last timechunk exhausts the rdLines generator underlying the # vdi.timechunks generator, so this restarts everything. vdi.fd.close() vdi.__enter__() _ = next( vdi.timechunks) # Initialize timechunks generator FSM. vdi.fd.seek(fileOffset) tci = next(vdi.timechunks) _, changedVarIds, newValues = tci changedVars = \ [detypeVarName(vdi.mapVarIdToNames[v][0]) \ for v in changedVarIds] if newTime == wrqTime_: # Append this timechunk to queue. wrqChangedVars_ += changedVars wrqNewValues_ += newValues else: # Merge all timechunks in the queue. _merge = dict(zip(wrqChangedVars_, wrqNewValues_)) if 0 < len(_merge): mrgdChangedVars, mrgdNewValues = \ zip(*[(k,v) for k,v in _merge.items()]) vdo.wrTimechunk((wrqTime_, mrgdChangedVars, mrgdNewValues)) wrqChangedVars_, wrqNewValues_ = changedVars, newValues wrqTime_ = newTime # Merge last lot of timechunks in the queue. _merge = dict(zip(wrqChangedVars_, wrqNewValues_)) if 0 < len(_merge): mrgdChangedVars, mrgdNewValues = \ zip(*[(k,v) for k,v in _merge.items()]) tco = (wrqTime_, mrgdChangedVars, mrgdNewValues) vdo.wrTimechunk((wrqTime_, mrgdChangedVars, mrgdNewValues)) rmtree(tmpd) return 0
def rdEvs(names, startTime, finishTime, fxbits=0): # {{{ '''Read EVent Samples (sanitized data written by evaInit to foo.eva/signals/*) in [startTime, finishTime), and return as ndarrays. ''' names = set(names) assert paths._INITIALIZED assert isinstance(startTime, int), type(startTime) assert isinstance(finishTime, int), type(finishTime) assert startTime < finishTime, (startTime, finishTime) #assert 0 <= startTime, startTime # Effectively zext with -ve startTime. assert 1 <= finishTime, finishTime sIdx, fIdx = 0, finishTime - startTime bNames, rNames = \ [nm for nm in names if not nm.startswith("normal.")], \ [nm for nm in names if nm.startswith("normal.")] bStartOffsets, rStartOffsets = \ (meaSearch(nm, startTime) for nm in bNames), \ (meaSearch(nm, startTime) for nm in rNames) # Axis0 corresponds to order of names. bShape, rShape = \ (len(bNames), fIdx), \ (len(rNames), fIdx) bDtype, rDtype = \ np.bool, \ np.float32 if fxbits == 0 else fxDtype(fxbits) # Fully allocate memory before any filling to ensure there is enough. bEvs, rEvs = \ np.zeros(bShape, dtype=bDtype), \ np.zeros(rShape, dtype=rDtype) bStructFmt, rStructFmt = \ ">L", \ ">Lf" if fxbits == 0 else ">LL" bStrideBytes, rStrideBytes = 4, 8 # Fill by infer/copy bit values from signals.vcd to ndarray. for i, (nm, startOffset) in enumerate(zip(bNames, bStartOffsets)): # {{{ prevIdx, prevValue = 0, False with Fragile(open(joinP(paths.dname_mea, nm), 'rb')) as fd: o_ = max(0, startOffset) fd.seek(o_ * bStrideBytes) bs = fd.read(bStrideBytes) # Read first timestamp. if len(bs) != bStrideBytes: raise Fragile.Break # Tried reading past EOF t, v = struct.unpack(bStructFmt, bs)[0], isEven(o_) tIdx = t - startTime o_ += 1 while tIdx < fIdx: if prevValue: # Initialised to 0, only update bool if necessary. bEvs[i][prevIdx:tIdx] = prevValue prevIdx, prevValue = tIdx, v bs = fd.read(bStrideBytes) # Read timestamp. if len(bs) != bStrideBytes: raise Fragile.Break # Tried reading past EOF t, v = struct.unpack(bStructFmt, bs)[0], isEven(o_) tIdx = t - startTime o_ += 1 if prevValue: # Initialised to 0, only update bool if necessary. bEvs[i][prevIdx:] = prevValue # }}} infer/copy/fill bEvs # Fill by infer/copy real values from signals.vcd to ndarray. for i, (nm, startOffset) in enumerate(zip(rNames, rStartOffsets)): # {{{ prevIdx, prevValue = 0, 0.0 with Fragile(open(joinP(paths.dname_mea, nm), 'rb')) as fd: o_ = max(0, startOffset) fd.seek(o_ * rStrideBytes) bs = fd.read(rStrideBytes) # Read first timestamp. if len(bs) != rStrideBytes: raise Fragile.Break # Tried reading past EOF t, v = struct.unpack(rStructFmt, bs) tIdx = t - startTime o_ += 1 while tIdx < fIdx: rEvs[i][prevIdx:tIdx] = prevValue \ if fxbits == 0 else fxFromFloat(prevValue, nBits=fxbits) prevIdx, prevValue = tIdx, v bs = fd.read(rStrideBytes) # Read timestamp. if len(bs) != rStrideBytes: raise Fragile.Break # Tried reading past EOF t, v = struct.unpack(rStructFmt, bs) tIdx = t - startTime o_ += 1 rEvs[i][prevIdx:] = prevValue \ if fxbits == 0 else fxFromFloat(prevValue, nBits=fxbits) # }}} infer/copy/fill rEvs mapNameToDatarow = {nm: (bEvs if bNotR else rEvs)[i] for bNotR,i,nm in \ ([(True, i, nm) for i, nm in enumerate(bNames)] + \ [(False, i, nm) for i, nm in enumerate(rNames)])} assert len(names) == len(mapNameToDatarow.keys()), \ (len(names), len(mapNameToDatarow.keys())) assert sorted(list(names)) == sorted(list(mapNameToDatarow.keys())), \ (names, mapNameToDatarow.keys()) expectedLen = finishTime - startTime for nm, row in mapNameToDatarow.items(): assert 1 == len(row.shape), (nm, row.shape) assert expectedLen == row.shape[0], (nm, expectedLen, row.shape) return mapNameToDatarow
def meaSearch(name, targetTime, precNotSucc=True): # {{{ '''Return offset of nearest timestamp. Offset is number of timestamps, not number of bytes. Search forward in exponential steps, then bisect when overstepped. Return offset of nearest previous/next depending on precNotSucc when an exact match isn't found. None <-- Request succeeding timestamp after EOF -1 <-- Request preceeding timestamp before first ''' assert isinstance(name, str), type(name) assert isinstance(targetTime, int), type(targetTime) #assert 0 <= targetTime, targetTime # Allow negative times. assert isinstance(precNotSucc, bool) _, strideBytes, structFmt = meaDtype(name) stepSize_ = 1 stepDir_ = 1 # Sticky flag tracks search phase. # Search forward in exponential steps, then bisect when overstepped. bisect_ = False t_, offset_ = None, -1 with open(joinP(paths.dname_mea, name), 'rb') as fd: while True: # Offset *before* reading timestamp. offset_ = fd.tell() // strideBytes # Read timestamp. bs = fd.read(strideBytes) if len(bs) != strideBytes: t_ = None # Tried reading past EOF else: t_ = struct.unpack(structFmt, bs)[0] assert t_ is None or (0 <= t_), t_ if t_ is None or t_ > targetTime: # Overstepped, continue search backwards. stepDir_ = -1 bisect_ = True elif t_ < targetTime: # Understepped, continue search forwards. stepDir_ = 1 else: assert t_ == targetTime break # Exact match stepSize_ = (stepSize_ // 2) if bisect_ else (stepSize_ * 2) if 0 == stepSize_: # No exact match exists break nextOffset = offset_ + stepSize_ * stepDir_ fd.seek(nextOffset * strideBytes) # t_ is now the "closest" possible. if t_ is None: # EOF # EOF, return last/maximum offset which must preceed targetTime or # None if no successor is possible. ret = (offset_ - 1) if precNotSucc else None else: assert isinstance(t_, int) assert 0 <= t_ if t_ == targetTime: # Simple case, exact match. ret = offset_ elif t_ < targetTime: ret = offset_ if precNotSucc else None else: # t_ > targetTime: ret = (offset_ - 1) if precNotSucc else offset_ # Offset of -1 when targetTime is less than the first # timestamp and precNotSucc is True. assert ret is None or (isinstance(ret, int) and -1 <= ret), ret ## Self-test to ensure that when ret is not None or -1, then it can be used ## to seek. #if isinstance(ret, int) and 0 <= ret: # with open(joinP(paths.dname_mea, name), 'rb') as fd: # fd.seek(ret) # assert len(fd.read(strideBytes)) == strideBytes return ret
def do_GET(self): # {{{ # Remove leading / which is usually (always?) present. self.path = self.path.lstrip('/') # Send response. if len(self.path) and not self.path.startswith("?") and \ (self.path.endswith(".css") or self.path.endswith(".js")): # Strip off any relative paths to only find CSS and JS files. fname = joinP(appPaths.share, os.path.basename(self.path)) try: response = rdTxt(fname) self.send_response(200) if fname.endswith(".js"): self.send_header("Content-Type", "application/javascript; charset=utf-8") elif fname.endswith(".css"): self.send_header("Content-Type", "text/css; charset=utf-8") responseBytes = response.encode("utf-8") except: self.send_error(404, "Invalid JS or CSS GET request!") return elif len(self.path) and self.path.endswith("favicon.ico"): # Bug in Chrome requests favicon.ico with every request until it # gets 404'd. # https://bugs.chromium.org/p/chromium/issues/detail?id=39402 try: faviconFpath = joinP(appPaths.share, "eva_logo.png") responseBytes = open(faviconFpath, 'rb').read() self.send_response(200) self.send_header("Content-Type", "image/x-icon") except: self.send_error(404, "Cannot read favicon!") elif len(self.path) and not self.path.startswith("?"): # Unknown requests. self.send_response(404) return else: # Generate HTML string and send OK if inputs are valid. try: request = self.parseGetRequest(self.path) verb("Calculating {a,b}(x|y;u) <-- {%s,%s}(%s|%s;%s)..." % ( request['a'], request['b'], request['x'], request['y'], request['u'], ), end='') response = evaHtmlString(self.args, self.cfg, request) verb("DONE") self.send_response(200) self.send_header("Content-Type", "text/html; charset=utf-8") responseBytes = response.encode("utf-8") except EvaHTMLException: self.send_error(404, "Invalid GET request!") return # Send HTTP headers. # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers self.send_header("Content-Length", "%d" % len(responseBytes)) self.end_headers() # Send HTML. self.wfile.write(responseBytes) return