コード例 #1
0
def migratePages(node):
    #print >>sys.stderr, 'Migrating pages...'
    pages = []
    #print node
    for name, child in node.children.iteritems():
        if name == 'Glossary':
            continue
        page = raw_doc.RawPage()
        page.title.tokens.append(lexer.Token('WORD', name, 0, 0, 0))
        s = 'Page'
        page.name.tokens.append(
            lexer.Token('WORD', s + page.title.text, 0, 0, 0))
        if child.children.get('summary'):
            for text in child.children['summary'].texts:
                if text.startswith('type=text:'):
                    t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
                else:
                    t = lexer.Token('WORD', text, 0, 0, 0)
                raw_text = raw_doc.RawText(translateTokens([t]))
                raw_brief = raw_doc.RawBrief(raw_text)
                page.briefs.append(raw_brief)
        if child.children.get('description'):
            for text in child.children['description'].texts:
                if text.startswith('type=text:'):
                    t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
                    raw_par = raw_doc.RawParagraph(
                        raw_doc.RawText(translateTokens([t])))
                elif text.startswith('type=section:#'):
                    t = lexer.Token('WORD', text[len('type=section:#'):], 0, 0,
                                    0)
                    raw_par = raw_doc.RawSection(
                        raw_doc.RawText(translateTokens([t])))
                elif text.startswith('type=subsection:#.#'):
                    t = lexer.Token('WORD', text[len('type=subsection:#.#'):],
                                    0, 0, 0)
                    raw_par = raw_doc.RawSection(
                        raw_doc.RawText(translateTokens([t])), 1)
                else:
                    t = lexer.Token('WORD', text, 0, 0, 0)
                    raw_par = raw_doc.RawParagraph(
                        raw_doc.RawText(translateTokens([t])))
                page.body.addParagraph(raw_par)
        pages.append(page)
    print 'RESULTING PAGES %s' % [p.title.text for p in pages]
    return pages
コード例 #2
0
 def _processTextNode(self, node):
     texts = []
     for text in node.texts:
         if text.startswith('type=text:'):
             t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
             raw_text = raw_doc.RawParagraph(raw_doc.RawText([t]))
         elif text.startswith('type=section:#'):
             t = lexer.Token('WORD', text[len('type=section:#'):], 0, 0, 0)
             raw_text = raw_doc.RawSection(raw_doc.RawText([t]))
         elif text.startswith('type=subsection:#.#'):
             t = lexer.Token('WORD', text[len('type=subsection:#.#'):], 0,
                             0, 0)
             raw_text = raw_doc.RawSection(raw_doc.RawText([t]), 1)
         else:
             t = lexer.Token('WORD', text, 0, 0, 0)
             raw_par = raw_doc.RawParagraph(raw_doc.RawText([t]))
         pars.append(raw_par)
     return pars
コード例 #3
0
 def processRemarks(self, entry, node, is_first=True):
     if is_first:
         raw_text = raw_doc.RawText(
             [lexer.Token('WORD', 'Remarks', 0, 0, 0)])
         raw_section = raw_doc.RawSection(raw_text)
         entry.addParagraph(raw_section)
     for text in node.texts:
         if text.startswith('type=text:'):
             t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
         else:
             t = lexer.Token('WORD', text, 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
         entry.addParagraph(raw_doc.RawParagraph(raw_text))
コード例 #4
0
 def processExample(self, entry, node):
     # Add example header.
     raw_text = raw_doc.RawText([lexer.Token('WORD', 'Examples', 0, 0, 0)])
     raw_section = raw_doc.RawSection(raw_text)
     entry.addParagraph(raw_section)
     for text in node.texts:
         if text.startswith('type=text:'):
             t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
             raw_par = raw_doc.RawParagraph(raw_text)
         elif text.startswith('type=code:'):
             t = lexer.Token('WORD',
                             '{.cpp}\n' + text[len('type=code:'):] + '\n',
                             0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
             raw_par = raw_doc.RawCode(raw_text)
         else:
             t = lexer.Token('WORD', text, 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
             raw_par = raw_doc.RawParagraph(raw_text)
         entry.addParagraph(raw_par)
コード例 #5
0
ファイル: dox_parser.py プロジェクト: zihua/seqan
 def getEntry(self):
     """Returns the Entry for the template parameter."""
     return raw_doc.RawSection(self.first_token,
                               raw_doc.RawText(self.tokens), self.level)
コード例 #6
0
 def testGetFormatted(self):
     section = raw_doc.RawSection(self.txt, 1)
     self.assertEqual(section.getFormatted(self.formatter),
                      '@subsection aword\n')
コード例 #7
0
 def testGetCommand(self):
     section = raw_doc.RawSection(self.txt, 0)
     self.assertEqual(section.getCommand(), 'section')
     section = raw_doc.RawSection(self.txt, 1)
     self.assertEqual(section.getCommand(), 'subsection')
コード例 #8
0
 def testCreationWithLevel(self):
     section = raw_doc.RawSection(self.txt, 1)
     self.assertEqual(section.heading, self.txt)
     self.assertEqual(section.level, 1)
コード例 #9
0
 def testGetType(self):
     section = raw_doc.RawSection(self.txt, 1)
     self.assertEqual(section.getType(), 'section')