def getSyntaxRegions(self): self.tokenizer = CppTokenizer(self.source) for token in self.tokenizer.getTokens(): if token.token_type == self.tokenizer.KEYWORD: yield (token.start, token.end, 'k') elif token.token_type == self.tokenizer.STRING: yield (token.start, token.end, 'str') elif token.token_type == self.tokenizer.COMMENT: yield (token.start, token.end, 'c') elif token.token_type == self.tokenizer.PREPROCESSOR: yield (token.start, token.end, 'p')
class CxxHtmlifier: def __init__(self, blob, srcpath, treecfg): self.source = dxr.readFile(srcpath) self.srcpath = srcpath.replace(treecfg.sourcedir + '/', '') self.blob_file = blob["byfile"].get(self.srcpath, None) def collectSidebar(self): if self.blob_file is None: return def line(linestr): return linestr.split(':')[1] def make_tuple(df, name, loc, scope="scopeid", decl=False): if decl: img = 'images/icons/page_white_code.png' else: loc = df[loc] img = 'images/icons/page_white_wrench.png' if scope in df and df[scope] > 0: return (df[name], loc.split(':')[1], df[name], img, dxr.languages.get_row_for_id("scopes", df[scope])["sname"]) return (df[name], loc.split(':')[1], df[name], img) for df in self.blob_file["types"]: yield make_tuple(df, "tqualname", "tloc", "scopeid") for df in self.blob_file["functions"]: yield make_tuple(df, "fqualname", "floc", "scopeid") for df in self.blob_file["variables"]: if "scopeid" in df and dxr.languages.get_row_for_id("functions", df["scopeid"]) is not None: continue yield make_tuple(df, "vname", "vloc", "scopeid") tblmap = { "functions": "fqualname", "types": "tqualname" } for df in self.blob_file["decldef"]: table = df["table"] if table in tblmap: yield make_tuple(dxr.languages.get_row_for_id(table, df["defid"]), tblmap[table], df["declloc"], "scopeid", True) for df in self.blob_file["macros"]: yield make_tuple(df, "macroname", "macroloc") def getSyntaxRegions(self): self.tokenizer = CppTokenizer(self.source) for token in self.tokenizer.getTokens(): if token.token_type == self.tokenizer.KEYWORD: yield (token.start, token.end, 'k') elif token.token_type == self.tokenizer.STRING: yield (token.start, token.end, 'str') elif token.token_type == self.tokenizer.COMMENT: yield (token.start, token.end, 'c') elif token.token_type == self.tokenizer.PREPROCESSOR: yield (token.start, token.end, 'p') def getLinkRegions(self): if self.blob_file is None: return def make_link(obj, clazz, rid): start, end = obj['extent'].split(':') start, end = int(start), int(end) kwargs = {} kwargs['rid'] = rid kwargs['class'] = clazz return (start, end, kwargs) tblmap = { "variables": ("var", "varid"), "functions": ("func", "funcid"), "types": ("t", "tid"), "refs": ("ref", "refid"), } for tablename in tblmap: tbl = self.blob_file[tablename] kind, rid = tblmap[tablename] for df in tbl: if 'extent' in df: yield make_link(df, kind, df[rid]) for decl in self.blob_file["decldef"]: if 'extent' not in decl: continue yield make_link(decl, tblmap[decl["table"]][0], decl["defid"]) for macro in self.blob_file["macros"]: line, col = macro['macroloc'].split(':')[1:] line, col = int(line), int(col) yield ((line, col), (line, col + len(macro['macroname'])), {'class': 'm', 'rid': macro['macroid']}) def getLineAnnotations(self): if self.blob_file is None: return for warn in self.blob_file["warnings"]: line = int(warn["wloc"].split(":")[1]) yield (line, {"class": "lnw", "title": warn["wmsg"]})