Exemplo n.º 1
0
 def __init__(self, theFig, theLineNum):
     super(SVGTreeNodeMain, self).__init__(theFig, theLineNum)
     # PpTokenCount object for my children only, set on finalise
     self._tokenCounterChildren = PpTokenCount.PpTokenCount()
     ## PpTokenCount object for me and my my children , set on finalise
     #self._tokenCounterTotal = PpTokenCount.PpTokenCount()
     # Total number of significant tokens in all children
     self._numChildSigTokens = 0
     # Mandatory override of the bounding box object
     self._bb = PlotNode.PlotNodeBboxBoxy()
     if theFig is None:
         # Root node, children only
         self._dataMap = None
     else:
         self._dataMap = {}
         # Take a copy of the include graph data
         self._dataMap['numToks'] = theFig.numTokens
         self._dataMap['condComp'] = theFig.condComp
         # A PpTokenCount.PpTokenCount() object for this node only.
         self._dataMap['tokenCntr'] = theFig.tokenCounter
         self._dataMap['findLogic'] = theFig.findLogic
     # A list of tuples of (Coord.Pt, Cooord.Box, attributes) that are to be
     # written last as <rect class="invis" ...
     self._triggerS = []
     # We have two passes
     self._numPassesToPlotSelf = 2
Exemplo n.º 2
0
    def __init__(self, theFig, theLineNum):
        """Constructor.

        :param theFig: The file include graph.
        :param theLineNum: The line number.
        """
        super(IncGraphXML, self).__init__(theLineNum)
        self._isRoot = theFig is None
        self._tokenCounterChildren = PpTokenCount.PpTokenCount()
        if self._isRoot:
            # Root node, children only
            self._dataMap = None
        else:
            self._dataMap = {}
            # Take a copy of the include graph data
            self._dataMap['fileName'] = theFig.fileName
            self._dataMap['numToks'] = theFig.numTokens
            # This is a string - currently See core.CppCond for
            # how this might change.
            self._dataMap['condComp'] = theFig.condComp
            self._dataMap['condCompState'] = theFig.condCompState
            # A PpTokenCount.PpTokenCount() object.
            self._dataMap['tokenCntr'] = theFig.tokenCounter
            # Another string
            self._dataMap['findLogic'] = theFig.findLogic
Exemplo n.º 3
0
 def finalise(self):
     """Finalisation this sets up all the bounding boxes of me and my children."""
     for aChild in self._children:
         aChild.finalise()
     # Now accumulate my children's bounding boxes and token counts
     self._tokenCounterChildren = PpTokenCount.PpTokenCount()
     #self._tokenCounterTotal = PpTokenCount.PpTokenCount()
     #if not self.isRoot:
     #    self._tokenCounterTotal += self.tokenCounter
     self._numChildSigTokens = 0
     for aChild in self._children:
         self._bb.extendChildBbox(aChild.bb.bbSigma)
         self._tokenCounterChildren += aChild.tokenCounterTotal
         #self._tokenCounterTotal += aChild.tokenCounter
         self._numChildSigTokens += aChild.tokenCounterTotal.tokenCountNonWs(
             isAll=False)
     # Set up my bounding box only if non-root node
     if not self.isRoot:
         #self._bb.width = max(self.WIDTH_MINIMUM, self.WIDTH_PER_TOKEN.scale(self._tokenCount))
         self._bb.width = self.WIDTH_MINIMUM \
             + self.WIDTH_PER_TOKEN.scale(self.tokenCounterTotal.tokenCountNonWs(isAll=False))
         self._bb.depth = self.FILE_DEPTH
         self._bb.bbSelfPadding = self.FILE_PADDING
         if len(self._children) > 0:
             self._bb.bbSpaceChildren = self.SPACE_PARENT_CHILD
Exemplo n.º 4
0
 def finalise(self):
     """This will be called on finalisation. This just accumulates the
     child token counter."""
     self._tokenCounterChildren = PpTokenCount.PpTokenCount()
     for aChild in self._children:
         aChild.finalise()
     for aChild in self._children:
         self._tokenCounterChildren += aChild.tokenCounter
         self._tokenCounterChildren += aChild.tokenCounterChildren
Exemplo n.º 5
0
 def __init__(self, theFpo, theDiag):
     """Constructor
     theFpo     - A FilePathOrigin object that identifies the file.
     theDiag    - A CppDiagnostic object to give to the PpTokeniser."""
     self.fileName = theFpo.filePath
     # Create a new PpTokeniser
     self.ppt = PpTokeniser.PpTokeniser(
         theFileObj=theFpo.fileObj,
         theFileId=theFpo.filePath,
         theDiagnostic=theDiag,
     )
     self.tokenCounter = PpTokenCount.PpTokenCount()
     self.origin = theFpo.origin
Exemplo n.º 6
0
    def __init__(self, theFpo, theDiag):
        """Constructor.

        :param theFpo: A FilePathOrigin object that identifies the file.
        :type theFpo: ``cpip.core.IncludeHandler.FilePathOrigin([_io.StringIO, str, NoneType, str]), cpip.core.IncludeHandler.FilePathOrigin([_io.TextIOWrapper, str, str, str])``

        :param theDiag: A CppDiagnostic object to give to the PpTokeniser.
        :type theDiag: ``cpip.core.CppDiagnostic.PreprocessDiagnosticStd``

        :returns: ``NoneType``
        """
        self.fileName = theFpo.filePath
        # Create a new PpTokeniser
        self.ppt = PpTokeniser.PpTokeniser(
            theFileObj=theFpo.fileObj,
            theFileId=theFpo.filePath,
            theDiagnostic=theDiag,
        )
        self.tokenCounter = PpTokenCount.PpTokenCount()
Exemplo n.º 7
0
def processTuToHtml(theLex, theHtmlPath, theTitle, theCondLevel, theIdxPath, incItuAnchors=True):
    """Processes the PpLexer and writes the tokens to the HTML file.
    
    *theHtmlPath*
        The path to the HTML file to write.
    
    *theTitle*
        A string to go into the <title> element.
    
    *theCondLevel*
        The Conditional level to pass to theLex.ppTokens()
        
    *theIdxPath*
        Path to link back to the index page.
        
    *incItuAnchors*
        boolean, if True will write anchors for lines in the ITU
        that are in this TU. If True then setItuLineNumbers returned is likely
        to be non-empty.
    
    Returns a pair of (PpTokenCount.PpTokenCount(), set(int))
    The latter is a set of integer line numbers in the ITU that are in the TU,
    these line numbers with have anchors in this HTML file of the form:
    <a name="%d" />."""
    if not os.path.exists(os.path.dirname(theHtmlPath)):
        os.makedirs(os.path.dirname(theHtmlPath))
    LINE_FIELD_WIDTH = 8
    LINE_BREAK_LENGTH = 100
    # Make a global token counter (this could be got from the file include graph
    # but this is simpler.
    myTokCntr = PpTokenCount.PpTokenCount()
    # Write CSS
    TokenCss.writeCssToDir(os.path.dirname(theHtmlPath))
    # Set of active lines of the ITU (only) that made it into the TU
    setItuLineNumbers = set()
    # Process the TU
    with XmlWrite.XhtmlStream(theHtmlPath, mustIndent=cpip.INDENT_ML) as myS:
        with XmlWrite.Element(myS, 'head'):
            with XmlWrite.Element(
                myS,
                'link',
                {
                    'href'  : TokenCss.TT_CSS_FILE,
                    'type'  : "text/css",
                    'rel'   : "stylesheet",
                    }
                ):
                pass
            with XmlWrite.Element(myS, 'title'):
                myS.characters(theTitle)
        myIntId = 0
        with XmlWrite.Element(myS, 'body'):
            with XmlWrite.Element(myS, 'h1'):
                myS.characters('Translation Unit: %s' % theLex.tuFileId)
            with XmlWrite.Element(myS, 'p'):
                myS.characters("""An annotated version of the translation unit
with minimal whitespace. Indentation is according to the depth of the #include stack.
Line numbers are linked to the original source code.
""")
            with XmlWrite.Element(myS, 'p'):
                myS.characters("""Highlighted filenames take you forward to the
next occasion in the include graph of the file being pre-processed, in this case: %s""" % theLex.tuFileId)
            linkToIndex(myS, theIdxPath)
            with XmlWrite.Element(myS, 'pre'):
                # My copy of the file stack for annotating the output
                myFileStack = []
                indentStr = ''
                colNum = 1
                for t in theLex.ppTokens(incWs=True, minWs=True, condLevel=theCondLevel):
                    #print t
                    logging.debug('Token: %s', str(t))
                    myTokCntr.inc(t, isUnCond=t.isUnCond, num=1)
                    if t.isUnCond:
                        # Adjust the prefix depending on how deep we are in the file stack
                        myIntId = _adjustFileStack(myS, theLex.fileStack, myFileStack, myIntId)
                        indentStr = '.' * len(myFileStack)
                        # Write the token
                        if t.tt == 'whitespace':
                            if t.t != '\n' and colNum > LINE_BREAK_LENGTH:
                                myS.characters(' \\\n')
                                myS.characters(indentStr)
                                myS.characters(' ' * (LINE_FIELD_WIDTH + 8))
                                colNum = 1
                            else:
                                # Line break
                                myS.characters(t.t)
                                ## NOTE: This is removed as the cost to the
                                ## browser is enormous.
                                ## Set a marker
                                #with XmlWrite.Element(myS,
                                #                      'a',
                                #                      {'name' : myTuI.add(theLex.tuIndex)}):
                                #    pass
                        else:
                            if colNum > LINE_BREAK_LENGTH:
                                # Force a break
                                myS.characters('\\\n')
                                myS.characters(indentStr)
                                myS.characters(' ' * (LINE_FIELD_WIDTH + 8))
                                colNum = 1
                            with XmlWrite.Element(myS, 'span',
                                            {'class' : TokenCss.retClass(t.tt)}):
                                myS.characters(t.t)
                                colNum += len(t.t)
                        if t.t == '\n' and len(myFileStack) != 0:
                            # Write an ID for the ITU only
                            if incItuAnchors and len(myFileStack) == 1:
                                with XmlWrite.Element(myS, 'a',
                                                {'name' : '%d' % theLex.lineNum}):
                                    setItuLineNumbers.add(theLex.lineNum)
                            # Write the line prefix
                            myS.characters(indentStr)
                            myS.characters('[')
                            myS.characters(' ' * \
                                    (LINE_FIELD_WIDTH - len('%d' % theLex.lineNum)))
                            HtmlUtils.writeHtmlFileLink(
                                    myS,
                                    theLex.fileName,
                                    theLex.lineNum,
                                    '%d' % theLex.lineNum,
                                    theClass=None,
                                )
                            myS.characters(']: ')
                            colNum = 1
            linkToIndex(myS, theIdxPath)
    return myTokCntr, setItuLineNumbers
Exemplo n.º 8
0
    def test_10(self):
        """TestIncGraphSVGVisitor: Two pre-includes and a graph."""
        return
        # First create an include graph
        myFigr = FileIncludeGraph.FileIncludeGraphRoot()
        myTcs = PpTokenCount.PpTokenCountStack()
        myFs = []
        # push PreInclude_00
        myFigr.addGraph(
            FileIncludeGraph.FileIncludeGraph('PreInclude_00', True,
                                              'a >= b+2',
                                              'Forced PreInclude_00'))
        myTcs.push()
        myFs.append('PreInclude_00')
        myTcs.counter().inc(PpToken.PpToken('PreInclude_00', 'identifier'),
                            True, 8)
        myTcs.counter().inc(PpToken.PpToken('PreInclude_00', 'identifier'),
                            False, 148)
        # pop PreInclude_00
        myFigr.graph.retLatestNode(myFs).setTokenCounter(myTcs.pop())
        myFs.pop()
        self.assertEqual(len(myFs), 0)
        # push PreInclude_01
        myFigr.addGraph(
            FileIncludeGraph.FileIncludeGraph('PreInclude_01', True, 'x > 1',
                                              'Forced PreInclude_00'))
        myTcs.push()
        myFs.append('PreInclude_01')
        myTcs.counter().inc(PpToken.PpToken('PreInclude_01', 'identifier'),
                            True, 7)
        myTcs.counter().inc(PpToken.PpToken('PreInclude_01', 'identifier'),
                            False, 76)
        # pop PreInclude_01
        myFigr.graph.retLatestNode(myFs).setTokenCounter(myTcs.pop())
        myFs.pop()
        self.assertEqual(len(myFs), 0)
        # push ITU.h
        myFigr.addGraph(
            FileIncludeGraph.FileIncludeGraph('ITU.h', True, '', 'CP=.'))
        myTcs.push()
        myFs.append('ITU.h')
        self.assertEqual(3, myFigr.numTrees())
        # push ITU.h/a.h
        myFigr.graph.addBranch([
            'ITU.h',
        ], 15, 'a.h', True, '', 'CP=.')
        myTcs.push()
        myFs.append('a.h')
        myTcs.counter().inc(PpToken.PpToken('a.h', 'identifier'), True, 1)
        myTcs.counter().inc(PpToken.PpToken('a.h', 'identifier'), False, 1)
        # push ITU.h/a.h/aa.h
        myFigr.graph.addBranch(['ITU.h', 'a.h'], 17, 'aa.h', True, '', 'CP=.')
        myTcs.push()
        myFs.append('aa.h')
        myTcs.counter().inc(PpToken.PpToken('aa.h', 'identifier'), True, 2)
        myTcs.counter().inc(PpToken.PpToken('aa.h', 'identifier'), False, 2)
        # pop ITU.h/a.h/aa.h
        myFigr.graph.retLatestNode(myFs).setTokenCounter(myTcs.pop())
        myFs.pop()
        self.assertEqual(len(myFs), 2)
        # push ITU.h/a.h/ab.h
        myFigr.graph.addBranch(['ITU.h', 'a.h'], 19, 'ab.h', True, '', 'CP=.')
        myTcs.push()
        myFs.append('ab.h')
        myTcs.counter().inc(PpToken.PpToken('ab.h', 'identifier'), True, 4)
        myTcs.counter().inc(PpToken.PpToken('ab.h', 'identifier'), False, 4)
        # pop ITU.h/a.h/ab.h
        myFigr.graph.retLatestNode(myFs).setTokenCounter(myTcs.pop())
        myFs.pop()
        self.assertEqual(len(myFs), 2)
        # pop ITU.h/a.h
        myFigr.graph.retLatestNode(myFs).setTokenCounter(myTcs.pop())
        myFs.pop()
        self.assertEqual(len(myFs), 1)
        # push ITU.h/b.h
        myFigr.graph.addBranch([
            'ITU.h',
        ], 115, 'b.h', True, '', 'CP=.')
        myTcs.push()
        myFs.append('b.h')
        myTcs.counter().inc(PpToken.PpToken('b.h', 'identifier'), True, 8)
        myTcs.counter().inc(PpToken.PpToken('b.h', 'identifier'), False, 8)
        # push ITU.h/b.h/ba.h
        myFigr.graph.addBranch(['ITU.h', 'b.h'], 117, 'ba.h', True, '', 'CP=.')
        myTcs.push()
        myFs.append('ba.h')
        myTcs.counter().inc(PpToken.PpToken('ba.h', 'identifier'), True, 16)
        myTcs.counter().inc(PpToken.PpToken('ba.h', 'identifier'), False, 16)
        # pop ITU.h/b.h/ba.h
        myFigr.graph.retLatestNode(myFs).setTokenCounter(myTcs.pop())
        myFs.pop()
        self.assertEqual(len(myFs), 2)
        # push ITU.h/b.h/bb.h
        myFigr.graph.addBranch(['ITU.h', 'b.h'], 119, 'bb.h', True, '', 'CP=.')
        myTcs.push()
        myFs.append('bb.h')
        myTcs.counter().inc(PpToken.PpToken('bb.h', 'identifier'), True, 32)
        myTcs.counter().inc(PpToken.PpToken('bb.h', 'identifier'), False, 32)
        # pop ITU.h/b.h/bb.h
        myFigr.graph.retLatestNode(myFs).setTokenCounter(myTcs.pop())
        myFs.pop()
        self.assertEqual(len(myFs), 2)
        # pop ITU.h/b.h
        myFigr.graph.retLatestNode(myFs).setTokenCounter(myTcs.pop())
        myFs.pop()
        self.assertEqual(len(myFs), 1)
        # ITU.h
        myTcs.counter().inc(PpToken.PpToken('ITU.h', 'identifier'), True, 70)
        myTcs.counter().inc(PpToken.PpToken('ITU.h', 'identifier'), False, 70)
        myFigr.graph.retLatestNode(myFs).setTokenCounter(myTcs.pop())
        myFs.pop()
        self.assertEqual(len(myFs), 0)
        myTcs.close()
        expGraph = """PreInclude_00 [156, 8]:  True "a >= b+2" "Forced PreInclude_00"
PreInclude_01 [83, 7]:  True "x > 1" "Forced PreInclude_00"
ITU.h [140, 70]:  True "" "CP=."
000015: #include a.h
        a.h [2, 1]:  True "" "CP=."
        000017: #include aa.h
                aa.h [4, 2]:  True "" "CP=."
        000019: #include ab.h
                ab.h [8, 4]:  True "" "CP=."
000115: #include b.h
        b.h [16, 8]:  True "" "CP=."
        000117: #include ba.h
                ba.h [32, 16]:  True "" "CP=."
        000119: #include bb.h
                bb.h [64, 32]:  True "" "CP=.\""""
        print()
        print(expGraph)
        print()
        print(str(myFigr))
        print()
        myFigr.dumpGraph()
        self.assertEqual(expGraph, str(myFigr))
        # Now visit the graph
        myVis = FileIncludeGraph.FigVisitorTree(IncGraphSVG.SVGTreeNode)
        myFigr.acceptVisitor(myVis)
        # Tree is now a graph of IncGraphSVG.SVGTreeNode
        myIgs = myVis.tree()
        print()
        print('myIgs')
        #print myIgs
        myIgs.dumpToStream()
        print()
        # Create a plot configuration
        myTpt = TreePlotTransform.TreePlotTransform(myIgs.plotCanvas, 'top',
                                                    '-')
        mySvg = io.StringIO()
        myIgs.plotToFileObj(mySvg, myTpt)
        print()
        print(mySvg.getvalue())
Exemplo n.º 9
0
    def _altTextsForTokenCount(self):
        """Returns a list of strings that are the alternate text for token counts."""
        assert (not self.isRoot)
        if len(self._children) > 0:
            myCounterTotal = PpTokenCount.PpTokenCount()
            myCounterTotal += self.tokenCounter
            myCounterTotal += self.tokenCounterChildren
        FIELD_WIDTH = 7
        myTokTypeS = [t[0] for t in self.HIST_PP_TOKEN_TYPES_COLOURS]
        typeLen = max([len(t) for t in myTokTypeS])
        altTextS = []
        if len(self._children) > 0:
            altTextS.append('%*s %*s [%*s] %*s [%*s] %*s [%*s]' \
                            % (typeLen,
                               'Type',
                               FIELD_WIDTH,
                               'Me',
                               FIELD_WIDTH,
                               'Me',
                               FIELD_WIDTH,
                               'Child',
                               FIELD_WIDTH,
                               'Child',
                               FIELD_WIDTH,
                               'All',
                               FIELD_WIDTH,
                               'All',
                               )
                            )
        else:
            altTextS.append('%*s %*s [%*s]' \
                            % (typeLen,
                               'Type',
                               FIELD_WIDTH,
                               'Me',
                               FIELD_WIDTH,
                               'Me',
                               )
                            )
#         cntrAll = cntrSig = 0
        cntrTotalS = [
            0,
        ] * 6
        for t in myTokTypeS:
            cntrTotalS[0] += self.tokenCounter.tokenCount(t, isAll=True)
            cntrTotalS[1] += self.tokenCounter.tokenCount(t, isAll=False)
            line = '%*s %*d [%*d]' \
                                % (typeLen,
                                   t,
                                   FIELD_WIDTH,
                                   self.tokenCounter.tokenCount(t, isAll=True),
                                   FIELD_WIDTH,
                                   self.tokenCounter.tokenCount(t, isAll=False),
                                   )
            if len(self._children) > 0:
                line += ' %*d [%*d] %*d [%*d]' \
                                % (FIELD_WIDTH,
                                   self.tokenCounterChildren.tokenCount(t, isAll=True),
                                   FIELD_WIDTH,
                                   self.tokenCounterChildren.tokenCount(t, isAll=False),
                                   FIELD_WIDTH,
                                   myCounterTotal.tokenCount(t, isAll=True),
                                   FIELD_WIDTH,
                                   myCounterTotal.tokenCount(t, isAll=False),
                                   )
                cntrTotalS[2] += self.tokenCounterChildren.tokenCount(
                    t, isAll=True)
                cntrTotalS[3] += self.tokenCounterChildren.tokenCount(
                    t, isAll=False)
                cntrTotalS[4] += myCounterTotal.tokenCount(t, isAll=True)
                cntrTotalS[5] += myCounterTotal.tokenCount(t, isAll=False)
            altTextS.append(line)
        line = '%*s %*d [%*d]' \
            % (typeLen,
               'Total',
               FIELD_WIDTH,
               cntrTotalS[0],
               FIELD_WIDTH,
               cntrTotalS[1],
               )
        if len(self._children) > 0:
            line += ' %*d [%*d] %*d [%*d]' \
                            % (FIELD_WIDTH,
                               cntrTotalS[2],
                               FIELD_WIDTH,
                               cntrTotalS[3],
                               FIELD_WIDTH,
                               cntrTotalS[4],
                               FIELD_WIDTH,
                               cntrTotalS[5],
                               )
        altTextS.append(line)
        return altTextS
Exemplo n.º 10
0
 def tokenCounterTotal(self):
     """This is the computed PpTokenCount.PpTokenCount() me plus my descendents."""
     retVal = PpTokenCount.PpTokenCount()
     retVal += self.tokenCounter
     retVal += self.tokenCounterChildren
     return retVal
Exemplo n.º 11
0
 def tokenCounter(self):
     """This is the PpTokenCount.PpTokenCount() for me only."""
     if self.isRoot:
         return PpTokenCount.PpTokenCount()
     return self._dataMap['tokenCntr']
Exemplo n.º 12
0
 def setUp(self):
     self._ptc = PpTokenCount.PpTokenCount()
Exemplo n.º 13
0
 def setUp(self):
     self._ptcs = PpTokenCount.PpTokenCountStack()
Exemplo n.º 14
0
 def setUp(self):
     self._ptc_1 = PpTokenCount.PpTokenCount()
     self._ptc_2 = PpTokenCount.PpTokenCount()