def finalise(self): """Finalisation this sets up all the bounding boxes of me and my children.""" for aChild in self._children: aChild.finalise() # Now accumulate my children's bounding boxes and token counts self._tokenCounterChildren = PpTokenCount.PpTokenCount() #self._tokenCounterTotal = PpTokenCount.PpTokenCount() #if not self.isRoot: # self._tokenCounterTotal += self.tokenCounter self._numChildSigTokens = 0 for aChild in self._children: self._bb.extendChildBbox(aChild.bb.bbSigma) self._tokenCounterChildren += aChild.tokenCounterTotal #self._tokenCounterTotal += aChild.tokenCounter self._numChildSigTokens += aChild.tokenCounterTotal.tokenCountNonWs( isAll=False) # Set up my bounding box only if non-root node if not self.isRoot: #self._bb.width = max(self.WIDTH_MINIMUM, self.WIDTH_PER_TOKEN.scale(self._tokenCount)) self._bb.width = self.WIDTH_MINIMUM \ + self.WIDTH_PER_TOKEN.scale(self.tokenCounterTotal.tokenCountNonWs(isAll=False)) self._bb.depth = self.FILE_DEPTH self._bb.bbSelfPadding = self.FILE_PADDING if len(self._children) > 0: self._bb.bbSpaceChildren = self.SPACE_PARENT_CHILD
def __init__(self, theFig, theLineNum): """Constructor. :param theFig: The file include graph. :param theLineNum: The line number. """ super(IncGraphXML, self).__init__(theLineNum) self._isRoot = theFig is None self._tokenCounterChildren = PpTokenCount.PpTokenCount() if self._isRoot: # Root node, children only self._dataMap = None else: self._dataMap = {} # Take a copy of the include graph data self._dataMap['fileName'] = theFig.fileName self._dataMap['numToks'] = theFig.numTokens # This is a string - currently See core.CppCond for # how this might change. self._dataMap['condComp'] = theFig.condComp self._dataMap['condCompState'] = theFig.condCompState # A PpTokenCount.PpTokenCount() object. self._dataMap['tokenCntr'] = theFig.tokenCounter # Another string self._dataMap['findLogic'] = theFig.findLogic
def __init__(self, theFig, theLineNum): super(SVGTreeNodeMain, self).__init__(theFig, theLineNum) # PpTokenCount object for my children only, set on finalise self._tokenCounterChildren = PpTokenCount.PpTokenCount() ## PpTokenCount object for me and my my children , set on finalise #self._tokenCounterTotal = PpTokenCount.PpTokenCount() # Total number of significant tokens in all children self._numChildSigTokens = 0 # Mandatory override of the bounding box object self._bb = PlotNode.PlotNodeBboxBoxy() if theFig is None: # Root node, children only self._dataMap = None else: self._dataMap = {} # Take a copy of the include graph data self._dataMap['numToks'] = theFig.numTokens self._dataMap['condComp'] = theFig.condComp # A PpTokenCount.PpTokenCount() object for this node only. self._dataMap['tokenCntr'] = theFig.tokenCounter self._dataMap['findLogic'] = theFig.findLogic # A list of tuples of (Coord.Pt, Cooord.Box, attributes) that are to be # written last as <rect class="invis" ... self._triggerS = [] # We have two passes self._numPassesToPlotSelf = 2
def finalise(self): """This will be called on finalisation. This just accumulates the child token counter.""" self._tokenCounterChildren = PpTokenCount.PpTokenCount() for aChild in self._children: aChild.finalise() for aChild in self._children: self._tokenCounterChildren += aChild.tokenCounter self._tokenCounterChildren += aChild.tokenCounterChildren
def __init__(self, theFpo, theDiag): """Constructor theFpo - A FilePathOrigin object that identifies the file. theDiag - A CppDiagnostic object to give to the PpTokeniser.""" self.fileName = theFpo.filePath # Create a new PpTokeniser self.ppt = PpTokeniser.PpTokeniser( theFileObj=theFpo.fileObj, theFileId=theFpo.filePath, theDiagnostic=theDiag, ) self.tokenCounter = PpTokenCount.PpTokenCount() self.origin = theFpo.origin
def __init__(self, theFpo, theDiag): """Constructor. :param theFpo: A FilePathOrigin object that identifies the file. :type theFpo: ``cpip.core.IncludeHandler.FilePathOrigin([_io.StringIO, str, NoneType, str]), cpip.core.IncludeHandler.FilePathOrigin([_io.TextIOWrapper, str, str, str])`` :param theDiag: A CppDiagnostic object to give to the PpTokeniser. :type theDiag: ``cpip.core.CppDiagnostic.PreprocessDiagnosticStd`` :returns: ``NoneType`` """ self.fileName = theFpo.filePath # Create a new PpTokeniser self.ppt = PpTokeniser.PpTokeniser( theFileObj=theFpo.fileObj, theFileId=theFpo.filePath, theDiagnostic=theDiag, ) self.tokenCounter = PpTokenCount.PpTokenCount()
def processTuToHtml(theLex, theHtmlPath, theTitle, theCondLevel, theIdxPath, incItuAnchors=True): """Processes the PpLexer and writes the tokens to the HTML file. *theHtmlPath* The path to the HTML file to write. *theTitle* A string to go into the <title> element. *theCondLevel* The Conditional level to pass to theLex.ppTokens() *theIdxPath* Path to link back to the index page. *incItuAnchors* boolean, if True will write anchors for lines in the ITU that are in this TU. If True then setItuLineNumbers returned is likely to be non-empty. Returns a pair of (PpTokenCount.PpTokenCount(), set(int)) The latter is a set of integer line numbers in the ITU that are in the TU, these line numbers with have anchors in this HTML file of the form: <a name="%d" />.""" if not os.path.exists(os.path.dirname(theHtmlPath)): os.makedirs(os.path.dirname(theHtmlPath)) LINE_FIELD_WIDTH = 8 LINE_BREAK_LENGTH = 100 # Make a global token counter (this could be got from the file include graph # but this is simpler. myTokCntr = PpTokenCount.PpTokenCount() # Write CSS TokenCss.writeCssToDir(os.path.dirname(theHtmlPath)) # Set of active lines of the ITU (only) that made it into the TU setItuLineNumbers = set() # Process the TU with XmlWrite.XhtmlStream(theHtmlPath, mustIndent=cpip.INDENT_ML) as myS: with XmlWrite.Element(myS, 'head'): with XmlWrite.Element( myS, 'link', { 'href' : TokenCss.TT_CSS_FILE, 'type' : "text/css", 'rel' : "stylesheet", } ): pass with XmlWrite.Element(myS, 'title'): myS.characters(theTitle) myIntId = 0 with XmlWrite.Element(myS, 'body'): with XmlWrite.Element(myS, 'h1'): myS.characters('Translation Unit: %s' % theLex.tuFileId) with XmlWrite.Element(myS, 'p'): myS.characters("""An annotated version of the translation unit with minimal whitespace. Indentation is according to the depth of the #include stack. Line numbers are linked to the original source code. """) with XmlWrite.Element(myS, 'p'): myS.characters("""Highlighted filenames take you forward to the next occasion in the include graph of the file being pre-processed, in this case: %s""" % theLex.tuFileId) linkToIndex(myS, theIdxPath) with XmlWrite.Element(myS, 'pre'): # My copy of the file stack for annotating the output myFileStack = [] indentStr = '' colNum = 1 for t in theLex.ppTokens(incWs=True, minWs=True, condLevel=theCondLevel): #print t logging.debug('Token: %s', str(t)) myTokCntr.inc(t, isUnCond=t.isUnCond, num=1) if t.isUnCond: # Adjust the prefix depending on how deep we are in the file stack myIntId = _adjustFileStack(myS, theLex.fileStack, myFileStack, myIntId) indentStr = '.' * len(myFileStack) # Write the token if t.tt == 'whitespace': if t.t != '\n' and colNum > LINE_BREAK_LENGTH: myS.characters(' \\\n') myS.characters(indentStr) myS.characters(' ' * (LINE_FIELD_WIDTH + 8)) colNum = 1 else: # Line break myS.characters(t.t) ## NOTE: This is removed as the cost to the ## browser is enormous. ## Set a marker #with XmlWrite.Element(myS, # 'a', # {'name' : myTuI.add(theLex.tuIndex)}): # pass else: if colNum > LINE_BREAK_LENGTH: # Force a break myS.characters('\\\n') myS.characters(indentStr) myS.characters(' ' * (LINE_FIELD_WIDTH + 8)) colNum = 1 with XmlWrite.Element(myS, 'span', {'class' : TokenCss.retClass(t.tt)}): myS.characters(t.t) colNum += len(t.t) if t.t == '\n' and len(myFileStack) != 0: # Write an ID for the ITU only if incItuAnchors and len(myFileStack) == 1: with XmlWrite.Element(myS, 'a', {'name' : '%d' % theLex.lineNum}): setItuLineNumbers.add(theLex.lineNum) # Write the line prefix myS.characters(indentStr) myS.characters('[') myS.characters(' ' * \ (LINE_FIELD_WIDTH - len('%d' % theLex.lineNum))) HtmlUtils.writeHtmlFileLink( myS, theLex.fileName, theLex.lineNum, '%d' % theLex.lineNum, theClass=None, ) myS.characters(']: ') colNum = 1 linkToIndex(myS, theIdxPath) return myTokCntr, setItuLineNumbers
def _altTextsForTokenCount(self): """Returns a list of strings that are the alternate text for token counts.""" assert (not self.isRoot) if len(self._children) > 0: myCounterTotal = PpTokenCount.PpTokenCount() myCounterTotal += self.tokenCounter myCounterTotal += self.tokenCounterChildren FIELD_WIDTH = 7 myTokTypeS = [t[0] for t in self.HIST_PP_TOKEN_TYPES_COLOURS] typeLen = max([len(t) for t in myTokTypeS]) altTextS = [] if len(self._children) > 0: altTextS.append('%*s %*s [%*s] %*s [%*s] %*s [%*s]' \ % (typeLen, 'Type', FIELD_WIDTH, 'Me', FIELD_WIDTH, 'Me', FIELD_WIDTH, 'Child', FIELD_WIDTH, 'Child', FIELD_WIDTH, 'All', FIELD_WIDTH, 'All', ) ) else: altTextS.append('%*s %*s [%*s]' \ % (typeLen, 'Type', FIELD_WIDTH, 'Me', FIELD_WIDTH, 'Me', ) ) # cntrAll = cntrSig = 0 cntrTotalS = [ 0, ] * 6 for t in myTokTypeS: cntrTotalS[0] += self.tokenCounter.tokenCount(t, isAll=True) cntrTotalS[1] += self.tokenCounter.tokenCount(t, isAll=False) line = '%*s %*d [%*d]' \ % (typeLen, t, FIELD_WIDTH, self.tokenCounter.tokenCount(t, isAll=True), FIELD_WIDTH, self.tokenCounter.tokenCount(t, isAll=False), ) if len(self._children) > 0: line += ' %*d [%*d] %*d [%*d]' \ % (FIELD_WIDTH, self.tokenCounterChildren.tokenCount(t, isAll=True), FIELD_WIDTH, self.tokenCounterChildren.tokenCount(t, isAll=False), FIELD_WIDTH, myCounterTotal.tokenCount(t, isAll=True), FIELD_WIDTH, myCounterTotal.tokenCount(t, isAll=False), ) cntrTotalS[2] += self.tokenCounterChildren.tokenCount( t, isAll=True) cntrTotalS[3] += self.tokenCounterChildren.tokenCount( t, isAll=False) cntrTotalS[4] += myCounterTotal.tokenCount(t, isAll=True) cntrTotalS[5] += myCounterTotal.tokenCount(t, isAll=False) altTextS.append(line) line = '%*s %*d [%*d]' \ % (typeLen, 'Total', FIELD_WIDTH, cntrTotalS[0], FIELD_WIDTH, cntrTotalS[1], ) if len(self._children) > 0: line += ' %*d [%*d] %*d [%*d]' \ % (FIELD_WIDTH, cntrTotalS[2], FIELD_WIDTH, cntrTotalS[3], FIELD_WIDTH, cntrTotalS[4], FIELD_WIDTH, cntrTotalS[5], ) altTextS.append(line) return altTextS
def tokenCounterTotal(self): """This is the computed PpTokenCount.PpTokenCount() me plus my descendents.""" retVal = PpTokenCount.PpTokenCount() retVal += self.tokenCounter retVal += self.tokenCounterChildren return retVal
def tokenCounter(self): """This is the PpTokenCount.PpTokenCount() for me only.""" if self.isRoot: return PpTokenCount.PpTokenCount() return self._dataMap['tokenCntr']
def setUp(self): self._ptc = PpTokenCount.PpTokenCount()
def setUp(self): self._ptc_1 = PpTokenCount.PpTokenCount() self._ptc_2 = PpTokenCount.PpTokenCount()