コード例 #1
0
 def testAddParagraph(self):
     entry = raw_doc.RawEntry()
     self.assertEqual(entry.body, raw_doc.RawBody())
     p = raw_doc.RawParagraph()
     entry.addParagraph(p)
     b = raw_doc.RawBody()
     b.addParagraph(p)
     self.assertEqual(entry.body, b)
コード例 #2
0
def migratePages(node):
    #print >>sys.stderr, 'Migrating pages...'
    pages = []
    #print node
    for name, child in node.children.iteritems():
        if name == 'Glossary':
            continue
        page = raw_doc.RawPage()
        page.title.tokens.append(lexer.Token('WORD', name, 0, 0, 0))
        s = 'Page'
        page.name.tokens.append(
            lexer.Token('WORD', s + page.title.text, 0, 0, 0))
        if child.children.get('summary'):
            for text in child.children['summary'].texts:
                if text.startswith('type=text:'):
                    t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
                else:
                    t = lexer.Token('WORD', text, 0, 0, 0)
                raw_text = raw_doc.RawText(translateTokens([t]))
                raw_brief = raw_doc.RawBrief(raw_text)
                page.briefs.append(raw_brief)
        if child.children.get('description'):
            for text in child.children['description'].texts:
                if text.startswith('type=text:'):
                    t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
                    raw_par = raw_doc.RawParagraph(
                        raw_doc.RawText(translateTokens([t])))
                elif text.startswith('type=section:#'):
                    t = lexer.Token('WORD', text[len('type=section:#'):], 0, 0,
                                    0)
                    raw_par = raw_doc.RawSection(
                        raw_doc.RawText(translateTokens([t])))
                elif text.startswith('type=subsection:#.#'):
                    t = lexer.Token('WORD', text[len('type=subsection:#.#'):],
                                    0, 0, 0)
                    raw_par = raw_doc.RawSection(
                        raw_doc.RawText(translateTokens([t])), 1)
                else:
                    t = lexer.Token('WORD', text, 0, 0, 0)
                    raw_par = raw_doc.RawParagraph(
                        raw_doc.RawText(translateTokens([t])))
                page.body.addParagraph(raw_par)
        pages.append(page)
    print 'RESULTING PAGES %s' % [p.title.text for p in pages]
    return pages
コード例 #3
0
 def _processTextNode(self, node):
     texts = []
     for text in node.texts:
         if text.startswith('type=text:'):
             t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
             raw_text = raw_doc.RawParagraph(raw_doc.RawText([t]))
         elif text.startswith('type=section:#'):
             t = lexer.Token('WORD', text[len('type=section:#'):], 0, 0, 0)
             raw_text = raw_doc.RawSection(raw_doc.RawText([t]))
         elif text.startswith('type=subsection:#.#'):
             t = lexer.Token('WORD', text[len('type=subsection:#.#'):], 0,
                             0, 0)
             raw_text = raw_doc.RawSection(raw_doc.RawText([t]), 1)
         else:
             t = lexer.Token('WORD', text, 0, 0, 0)
             raw_par = raw_doc.RawParagraph(raw_doc.RawText([t]))
         pars.append(raw_par)
     return pars
コード例 #4
0
 def processDescription(self, entry, node):
     for text in node.texts:
         if text.startswith('type=text:'):
             t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
         else:
             t = lexer.Token('WORD', text, 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
         entry.addParagraph(raw_doc.RawParagraph(raw_text))
コード例 #5
0
 def processExample(self, entry, node):
     # Add example header.
     raw_text = raw_doc.RawText([lexer.Token('WORD', 'Examples', 0, 0, 0)])
     raw_section = raw_doc.RawSection(raw_text)
     entry.addParagraph(raw_section)
     for text in node.texts:
         if text.startswith('type=text:'):
             t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
             raw_par = raw_doc.RawParagraph(raw_text)
         elif text.startswith('type=code:'):
             t = lexer.Token('WORD',
                             '{.cpp}\n' + text[len('type=code:'):] + '\n',
                             0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
             raw_par = raw_doc.RawCode(raw_text)
         else:
             t = lexer.Token('WORD', text, 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
             raw_par = raw_doc.RawParagraph(raw_text)
         entry.addParagraph(raw_par)
コード例 #6
0
 def processRemarks(self, entry, node, is_first=True):
     if is_first:
         raw_text = raw_doc.RawText(
             [lexer.Token('WORD', 'Remarks', 0, 0, 0)])
         raw_section = raw_doc.RawSection(raw_text)
         entry.addParagraph(raw_section)
     for text in node.texts:
         if text.startswith('type=text:'):
             t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
         else:
             t = lexer.Token('WORD', text, 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
         entry.addParagraph(raw_doc.RawParagraph(raw_text))
コード例 #7
0
    def processTags(self, entry, node, tag_nodes):
        result = []
        group_name = translate(node.key)
        for key, value in tag_nodes.children.iteritems():
            raw_tag = raw_doc.RawTag()
            tag_name = value.key
            t = lexer.Token('WORD', group_name + '#' + tag_name, 0, 0, 0)
            raw_tag.name = raw_doc.RawText([t])

            processed = set()
            if value.children.get('summary'):
                self.processSummary(raw_tag, value.children['summary'])
                processed.add('summary')
            if value.children.get('remarks'):
                self.processRemarks(raw_tag, value.children['remarks'])
                processed.add('remarks')
            if value.children.get('remark'):
                self.processRemarks(raw_tag, value.children['remark'])
                processed.add('remark')
            if value.children.get('text'):
                self.processRemarks(raw_tag, value.children['text'])
                processed.add('text')
            if value.children.get('type'):
                ts = []
                if value.children['type'].texts:
                    ts.append(lexer.Token('WORD', ' Types: ', 0, 0, 0))
                    ts.append(
                        lexer.Token('WORD', value.children['type'].texts[0], 0,
                                    0, 0))
                for txt in value.children['type'].texts[1:]:
                    ts.append(lexer.Token('WORD', ', ' + txt, 0, 0, 0))
                raw_text = raw_doc.RawText(ts)
                entry.addParagraph(raw_doc.RawParagraph(raw_text))
                processed.add('type')

            processed.add('signature')  # Ignore.
            processed.add('include')  # TODO(holtgrew): Required here?
            processed.add('see')  # TODO(holtgrew): Required here?
            # We do not have double-linking any more.
            processed.add('function')

            unhandled = set(value.children.keys()) - processed
            if unhandled:
                print 'Missed %s in %s processed: %s' % (unhandled, node,
                                                         processed)
                sys.exit(1)

            #print raw_tag.getFormatted(formatter)
            result.append(raw_tag)
        return result
コード例 #8
0
 def processStatus(self, entry, node):
     # TODO(holtgrew): Add support for @deprecated.
     for text in node.texts:
         t = lexer.Token('WORD', 'Status: ' + text, 0, 0, 0)
         raw_text = raw_doc.RawText(translateTokens([t]))
         entry.addParagraph(raw_doc.RawParagraph(raw_text))
コード例 #9
0
 def processCite(self, entry, node):
     for text in node.texts:
         t = lexer.Token('WORD', text, 0, 0, 0)
         raw_text = raw_doc.RawText(translateTokens([t]))
         entry.addParagraph(raw_doc.RawParagraph(raw_text))
コード例 #10
0
 def processWiki(self, entry, node):
     for text in node.texts:
         t = lexer.Token('WORD', 'http://trac.seqan.de/wiki/' + text, 0, 0,
                         0)
         raw_text = raw_doc.RawText([t])
         entry.addParagraph(raw_doc.RawParagraph(raw_text))
コード例 #11
0
 def processDemo(self, entry, node):
     for text in node.texts:
         t1 = lexer.Token('WORD', 'Demo: ', 0, 0, 0)
         t2 = lexer.Token('WORD', text, 0, 0, 0)
         raw_text = raw_doc.RawText(translateTokens([t1, t2]))
         entry.addParagraph(raw_doc.RawParagraph(raw_text))
コード例 #12
0
 def setUp(self):
     self.t = lexer.Token('WORD', 'aword', 0, 0, 0)
     self.p = raw_doc.RawParagraph(raw_doc.RawText([self.t]))
     self.formatter = raw_doc.DoxFormatter()