示例#1
0
    def processValues(self, entry, node, value_nodes):
        result = []
        enum_name = translate(node.key)
        #print value_nodes
        for key, value in value_nodes.children.iteritems():
            raw_var = raw_doc.RawVariable()
            #print value
            var_name = value.key
            if '::' in enum_name:
                var_name = enum_name.split('::')[0] + '::' + var_name
            t = lexer.Token('WORD', enum_name, 0, 0, 0)
            raw_var.type = raw_doc.RawText([t])
            t = lexer.Token('WORD', enum_name + '#' + var_name, 0, 0, 0)
            raw_var.name = raw_doc.RawText([t])

            processed = set()
            if value.children.get('summary'):
                self.processSummary(raw_var, value.children['summary'])
                processed.add('summary')

            unhandled = set(value.children.keys()) - processed
            if unhandled:
                print 'Missed %s in %s' % (unhandled, child)
                sys.exit(1)

            #print raw_var.getFormatted(formatter)
            result.append(raw_var)
        return result
示例#2
0
文件: dox_parser.py 项目: zihua/seqan
 def handle(self, token):
     # Handle first state here and the remaining in the parent class.
     #print >>sys.stderr, token.type, repr(token.val), self.type_read
     if self.substate == 'first_line':
         # If we have a line break in the first line then we go to the body
         # of the class documentation.
         if token.type in dox_tokens.LINE_BREAKS or token.type == 'EOF':
             #print >>sys.stderr, [v.val for v in self.name_tokens], [v.val for v in self.type_tokens]
             normalizeWhitespaceTokens(self.name_tokens)
             normalizeWhitespaceTokens(self.type_tokens)
             self.entry.name = raw_doc.RawText(self.name_tokens)
             if self.entry.name.tokens[-1].val.endswith(
                     ';'):  # remove semicolon
                 self.entry.name.tokens[-1].val = self.entry.name.tokens[
                     -1].val[:-1]
             self.entry.type = raw_doc.RawText(self.type_tokens)
             self.substate = 'body'
             return
         # Skip space at the beginning of the type.
         if not self.type_tokens and token.type == 'SPACE':
             return
         # Otherwise, we collect the token's value.
         if self.type_read:
             #print >>sys.stderr, 'NAME', token.type, repr(token.val)
             self.name_tokens.append(token)
         else:
             if token.type == 'SPACE':
                 self.type_read = True
             else:
                 #print >>sys.stderr, 'TYPE', token.type, repr(token.val)
                 self.type_tokens.append(token)
     else:
         GenericDocState.handle(self, token)
示例#3
0
 def setUp(self):
     self.tok_name = lexer.Token('WORD', 'name', 0, 0, 0)
     self.tok_text = lexer.Token('WORD', 'text', 0, 0, 0)
     self.tok_inout = lexer.Token('PARAM_IN_OUT', '[in,out]', 0, 0, 0)
     self.txt_name = raw_doc.RawText([self.tok_name])
     self.txt_text = raw_doc.RawText([self.tok_text])
     self.formatter = raw_doc.DoxFormatter()
示例#4
0
 def processReturns(self, entry, node):
     ts = []
     if node.children.get('summary'):
         for text in node.children['summary'].texts:
             t = lexer.Token('WORD', text, 0, 0, 0)
             ts.append(t)
     if node.children.get('text'):
         for text in node.children['text'].texts:
             t = lexer.Token('WORD', text, 0, 0, 0)
             ts.append(t)
     if node.children.get('remarks'):
         for text in node.children['remarks'].texts:
             t = lexer.Token('WORD', text, 0, 0, 0)
             ts.append(t)
     if node.children.get('note'):
         for text in node.children['note'].texts:
             t = lexer.Token('WORD', text, 0, 0, 0)
             ts.append(t)
     if node.children.get('type'):
         if node.children['type'].texts:
             ts.append(lexer.Token('WORD', ' Types: ', 0, 0, 0))
             ts.append(
                 lexer.Token(
                     'WORD',
                     translateTypename(node.children['type'].texts[0]), 0,
                     0, 0))
         for txt in node.children['type'].texts[1:]:
             ts.append(
                 lexer.Token('WORD', ', ' + translateTypename(txt), 0, 0,
                             0))
     if node.children.get('metafunction'):
         if node.children['metafunction'].texts:
             ts.append(lexer.Token('WORD', ' Metafunctions: ', 0, 0, 0))
             ts.append(
                 lexer.Token('WORD', node.children['metafunction'].texts[0],
                             0, 0, 0))
         for txt in node.children['metafunction'].texts[1:]:
             ts.append(lexer.Token('WORD', ', ' + txt, 0, 0, 0))
     if node.children.get('default'):
         if node.children['default'].texts:
             ts.append(lexer.Token('WORD', ' Default: ', 0, 0, 0))
             ts.append(
                 lexer.Token('WORD', node.children['default'].texts[0], 0,
                             0, 0))
         for txt in node.children['default'].texts[1:]:
             ts.append(lexer.Token('WORD', ', ' + txt, 0, 0, 0))
     ts = translateTokens(ts)
     name = raw_doc.RawText([lexer.Token('WORD', 'TReturn', 0, 0, 0)])
     raw_text = raw_doc.RawText(ts)
     raw_return = raw_doc.RawReturn(name, raw_text)
     entry.addReturn(raw_return)
     # Check that we processed all attributes.
     unhandled = set(node.children.keys()) - set([
         'summary', 'remarks', 'type', 'metafunction', 'text', 'param',
         'note', 'default'
     ])
     if unhandled:
         print 'Missed %s in %s' % (unhandled, node)
         sys.exit(1)
示例#5
0
 def processDescription(self, entry, node):
     for text in node.texts:
         if text.startswith('type=text:'):
             t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
         else:
             t = lexer.Token('WORD', text, 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
         entry.addParagraph(raw_doc.RawParagraph(raw_text))
示例#6
0
 def processSummary(self, entry, node):
     for text in node.texts:
         if text.startswith('type=text:'):
             t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
         else:
             t = lexer.Token('WORD', text, 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
         entry.addBrief(raw_doc.RawBrief(raw_text))
示例#7
0
 def setUp(self):
     self.path_t = lexer.Token('WORD', 'apath', 0, 0, 0)
     self.path = raw_doc.RawText([self.path_t])
     self.snippet_t0 = lexer.Token('WORD', 'The', 0, 0, 0)
     self.snippet_t1 = lexer.Token('SPACE', ' ', 0, 0, 0)
     self.snippet_t2 = lexer.Token('WORD', 'snippet', 0, 0, 0)
     self.snippet = raw_doc.RawText(
         [self.snippet_t0, self.snippet_t1, self.snippet_t2])
     self.formatter = raw_doc.DoxFormatter()
示例#8
0
 def testGetFormatted(self):
     b = raw_doc.RawBrief(raw_doc.RawText([self.brief_tok]))
     entry = raw_doc.RawEntry([b])
     entry.name = raw_doc.RawText([self.name_tok])
     entry.title = raw_doc.RawText([self.title_tok])
     entry.sees = [raw_doc.RawSee(raw_doc.RawText([self.tok_see]))]
     formatter = raw_doc.DoxFormatter()
     msg = ('@<entry> Concept Concept Title\n\n'
            '@brief This is brief.\n\n'
            '@see See\n\n')
     self.assertMultiLineEqual(entry.getFormatted(formatter), msg)
示例#9
0
 def testGetFormatted(self):
     b = raw_doc.RawBrief(raw_doc.RawText([self.brief_tok]))
     code_entry = raw_doc.RawVariable([b])
     code_entry.name = raw_doc.RawText([self.name_tok])
     code_entry.type = raw_doc.RawText([self.type_tok])
     code_entry.sees = [raw_doc.RawSee(raw_doc.RawText([self.tok_see]))]
     s = raw_doc.RawSignature(raw_doc.RawText([self.tok_sig]))
     code_entry.addSignature(s)
     txt = ('@var int var\n\n'
            '@brief This is brief.\n\n'
            '@signature payload\n\n'
            '@see See\n\n')
     self.assertMultiLineEqual(code_entry.getFormatted(self.formatter), txt)
示例#10
0
 def processRemarks(self, entry, node, is_first=True):
     if is_first:
         raw_text = raw_doc.RawText(
             [lexer.Token('WORD', 'Remarks', 0, 0, 0)])
         raw_section = raw_doc.RawSection(raw_text)
         entry.addParagraph(raw_section)
     for text in node.texts:
         if text.startswith('type=text:'):
             t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
         else:
             t = lexer.Token('WORD', text, 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
         entry.addParagraph(raw_doc.RawParagraph(raw_text))
示例#11
0
    def processTags(self, entry, node, tag_nodes):
        result = []
        group_name = translate(node.key)
        for key, value in tag_nodes.children.iteritems():
            raw_tag = raw_doc.RawTag()
            tag_name = value.key
            t = lexer.Token('WORD', group_name + '#' + tag_name, 0, 0, 0)
            raw_tag.name = raw_doc.RawText([t])

            processed = set()
            if value.children.get('summary'):
                self.processSummary(raw_tag, value.children['summary'])
                processed.add('summary')
            if value.children.get('remarks'):
                self.processRemarks(raw_tag, value.children['remarks'])
                processed.add('remarks')
            if value.children.get('remark'):
                self.processRemarks(raw_tag, value.children['remark'])
                processed.add('remark')
            if value.children.get('text'):
                self.processRemarks(raw_tag, value.children['text'])
                processed.add('text')
            if value.children.get('type'):
                ts = []
                if value.children['type'].texts:
                    ts.append(lexer.Token('WORD', ' Types: ', 0, 0, 0))
                    ts.append(
                        lexer.Token('WORD', value.children['type'].texts[0], 0,
                                    0, 0))
                for txt in value.children['type'].texts[1:]:
                    ts.append(lexer.Token('WORD', ', ' + txt, 0, 0, 0))
                raw_text = raw_doc.RawText(ts)
                entry.addParagraph(raw_doc.RawParagraph(raw_text))
                processed.add('type')

            processed.add('signature')  # Ignore.
            processed.add('include')  # TODO(holtgrew): Required here?
            processed.add('see')  # TODO(holtgrew): Required here?
            # We do not have double-linking any more.
            processed.add('function')

            unhandled = set(value.children.keys()) - processed
            if unhandled:
                print 'Missed %s in %s processed: %s' % (unhandled, node,
                                                         processed)
                sys.exit(1)

            #print raw_tag.getFormatted(formatter)
            result.append(raw_tag)
        return result
示例#12
0
 def testGetFormatted(self):
     code_entry = raw_doc.RawCodeEntry()
     b = raw_doc.RawBrief(raw_doc.RawText([self.brief_tok]))
     code_entry = raw_doc.RawCodeEntry([b])
     code_entry.name = raw_doc.RawText([self.name_tok])
     code_entry.title = raw_doc.RawText([self.title_tok])
     code_entry.sees = [raw_doc.RawSee(raw_doc.RawText([self.tok_see]))]
     s = raw_doc.RawSignature(raw_doc.RawText([self.tok_sig]))
     code_entry.addSignature(s)
     formatter = raw_doc.DoxFormatter()
     txt = ('@<code entry> Concept Concept Title\n\n'
            '@brief This is brief.\n\n'
            '@signature payload\n\n'
            '@see See\n\n')
     self.assertMultiLineEqual(code_entry.getFormatted(formatter), txt)
示例#13
0
 def processFiles(self, entry, node):
     for text in node.texts:
         if text.startswith('../'):
             text = text[len('../'):]
         t = lexer.Token('WORD', text, 0, 0, 0)
         raw_text = raw_doc.RawText(translateTokens([t]))
         entry.addParagraph(raw_doc.RawInclude(raw_text))
示例#14
0
 def testAddSee(self):
     entry = raw_doc.RawEntry()
     self.assertEqual(entry.sees, [])
     s = raw_doc.RawSee(raw_doc.RawText())
     entry.addSee(s)
     self.assertEqual(len(entry.sees), 1)
     self.assertEqual(entry.sees, [s])
示例#15
0
 def testAddBrief(self):
     entry = raw_doc.RawEntry()
     self.assertEqual(entry.briefs, [])
     b = raw_doc.RawBrief(raw_doc.RawText())
     entry.addBrief(b)
     self.assertEqual(len(entry.briefs), 1)
     self.assertEqual(entry.briefs, [b])
示例#16
0
 def testInitialization(self):
     entry = raw_doc.RawEntry()
     self.assertEqual(entry.name.text, raw_doc.RawText().text)
     self.assertEqual(len(entry.briefs), 0)
     self.assertEqual(entry.body, raw_doc.RawBody())
     self.assertEqual(entry.sees, [])
     self.assertEqual(entry.command, '<entry>')
示例#17
0
    def handleCommandClosing(self):
        """Handle closing of current command."""
        assert self.current_cmd == 'COMMAND_LINK', 'Only known commandx.'
        if self.current_cmd == 'COMMAND_LINK':
            # Trim leading/trailing whitespace tokens
            def isWhitespace(t):
                return t.type in dox_tokens.WHITESPACE

            while self.tokens_cmd and isWhitespace(self.tokens_cmd[0]):
                self.tokens_cmd.pop(0)
            while self.tokens_cmd and isWhitespace(self.tokens_cmd[-1]):
                self.tokens_cmd.pop(-1)
            if not self.tokens_cmd:
                print >> sys.stderr, 'WARNING: Empty @link @endlink.'
                return
            # Get link target.
            target_token = self.tokens_cmd.pop(0)
            # Trim leading whitespace again.
            while self.tokens_cmd and isWhitespace(self.tokens_cmd[0]):
                self.tokens_cmd.pop(0)
            # Translate any remaining non-whitespace tokens.
            title_tokens = self.tokens_cmd
            link_text = raw_doc.RawText(title_tokens)
            conv = RawTextToTextNodeConverter()
            link_text_node = conv.run(link_text)
            link_text_node.type = 'a'
            link_text_node.attrs = {'href': 'seqan:' + target_token.val}
            self.current.addChild(link_text_node)
        self.tokens_cmd = []
        self.current_cmd = None
示例#18
0
def migratePages(node):
    #print >>sys.stderr, 'Migrating pages...'
    pages = []
    #print node
    for name, child in node.children.iteritems():
        if name == 'Glossary':
            continue
        page = raw_doc.RawPage()
        page.title.tokens.append(lexer.Token('WORD', name, 0, 0, 0))
        s = 'Page'
        page.name.tokens.append(
            lexer.Token('WORD', s + page.title.text, 0, 0, 0))
        if child.children.get('summary'):
            for text in child.children['summary'].texts:
                if text.startswith('type=text:'):
                    t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
                else:
                    t = lexer.Token('WORD', text, 0, 0, 0)
                raw_text = raw_doc.RawText(translateTokens([t]))
                raw_brief = raw_doc.RawBrief(raw_text)
                page.briefs.append(raw_brief)
        if child.children.get('description'):
            for text in child.children['description'].texts:
                if text.startswith('type=text:'):
                    t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
                    raw_par = raw_doc.RawParagraph(
                        raw_doc.RawText(translateTokens([t])))
                elif text.startswith('type=section:#'):
                    t = lexer.Token('WORD', text[len('type=section:#'):], 0, 0,
                                    0)
                    raw_par = raw_doc.RawSection(
                        raw_doc.RawText(translateTokens([t])))
                elif text.startswith('type=subsection:#.#'):
                    t = lexer.Token('WORD', text[len('type=subsection:#.#'):],
                                    0, 0, 0)
                    raw_par = raw_doc.RawSection(
                        raw_doc.RawText(translateTokens([t])), 1)
                else:
                    t = lexer.Token('WORD', text, 0, 0, 0)
                    raw_par = raw_doc.RawParagraph(
                        raw_doc.RawText(translateTokens([t])))
                page.body.addParagraph(raw_par)
        pages.append(page)
    print 'RESULTING PAGES %s' % [p.title.text for p in pages]
    return pages
示例#19
0
 def testConstructionWithTokens(self):
     tokens = [
         lexer.Token('WORD', 'test', 0, 0, 0),
         lexer.Token('SPACE', ' ', 0, 0, 0),
         lexer.Token('WORD', 'foo', 0, 0, 0)
     ]
     text = raw_doc.RawText(tokens)
     self.assertEqual(text.tokens, tokens)
     self.failIf(text.empty)
示例#20
0
 def _processTextNode(self, node):
     texts = []
     for text in node.texts:
         if text.startswith('type=text:'):
             t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
             raw_text = raw_doc.RawParagraph(raw_doc.RawText([t]))
         elif text.startswith('type=section:#'):
             t = lexer.Token('WORD', text[len('type=section:#'):], 0, 0, 0)
             raw_text = raw_doc.RawSection(raw_doc.RawText([t]))
         elif text.startswith('type=subsection:#.#'):
             t = lexer.Token('WORD', text[len('type=subsection:#.#'):], 0,
                             0, 0)
             raw_text = raw_doc.RawSection(raw_doc.RawText([t]), 1)
         else:
             t = lexer.Token('WORD', text, 0, 0, 0)
             raw_par = raw_doc.RawParagraph(raw_doc.RawText([t]))
         pars.append(raw_par)
     return pars
示例#21
0
 def processExtends(self, entry, node):
     for text in node.texts:
         if '\u0001' in text:
             continue  # do not add inherited
         if text.startswith('Class.'):
             t = lexer.Token('WORD', text[len('Class.'):], 0, 0, 0)
             raw_text = raw_doc.RawText([t])
             entry.addExtends(raw_doc.RawExtends(raw_text))
         elif text.startswith('Spec.'):
             t = lexer.Token('WORD', text[len('Spec.'):], 0, 0, 0)
             raw_text = raw_doc.RawText([t])
             entry.addExtends(raw_doc.RawExtends(raw_text))
         elif text.startswith('Concept.'):
             t = lexer.Token('WORD', text[len('Concept.'):], 0, 0, 0)
             raw_text = raw_doc.RawText([t])
             if hasattr(entry, 'addImplements'):
                 entry.addImplements(raw_doc.RawImplements(raw_text))
             else:
                 entry.addExtends(raw_doc.RawExtends(raw_text))
         else:
             assert False, str(node)
示例#22
0
 def processExample(self, entry, node):
     # Add example header.
     raw_text = raw_doc.RawText([lexer.Token('WORD', 'Examples', 0, 0, 0)])
     raw_section = raw_doc.RawSection(raw_text)
     entry.addParagraph(raw_section)
     for text in node.texts:
         if text.startswith('type=text:'):
             t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
             raw_par = raw_doc.RawParagraph(raw_text)
         elif text.startswith('type=code:'):
             t = lexer.Token('WORD',
                             '{.cpp}\n' + text[len('type=code:'):] + '\n',
                             0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
             raw_par = raw_doc.RawCode(raw_text)
         else:
             t = lexer.Token('WORD', text, 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
             raw_par = raw_doc.RawParagraph(raw_text)
         entry.addParagraph(raw_par)
示例#23
0
def addGroups(raw_entries):
    """Add implicitely given groups to raw_entries."""
    # Collect known names.
    names = set()
    for e in raw_entries:
        name = e.name.text
        if '#' not in name and '::' not in name:
            names.add(name)
    # Collect unknown names.
    unknown = set()
    for e in raw_entries:
        name = e.name.text
        if '#' in name and not '::' in name:
            prefix = name.split('#', 1)[0]
            if prefix not in names:
                unknown.add(prefix)
    # Add groups.
    for name in unknown:
        group = raw_doc.RawGroup()
        t = lexer.Token('WORD', name, 0, 0, 0)
        group.name = raw_doc.RawText(translateTokens([t]))
        raw_entries.append(group)
示例#24
0
    def processParams(self, entry, node):
        for name, child in node.children.iteritems():
            ts = []
            if child.children.get('summary'):
                for summary in child.children['summary'].texts:
                    ts.append(lexer.Token('WORD', translate(summary), 0, 0, 0))
            if child.children.get('text'):
                for remark in child.children['text'].texts:
                    ts.append(lexer.Token('WORD', remark, 0, 0, 0))
            if child.children.get('tableheader'):
                for line in child.children['tableheader'].texts:
                    ts.append(lexer.Token('WORD', line, 0, 0, 0))
            if child.children.get('table'):
                for line in child.children['table'].texts:
                    ts.append(lexer.Token('WORD', line, 0, 0, 0))
            if child.children.get('remarks'):
                for remark in child.children['remarks'].texts:
                    ts.append(lexer.Token('WORD', remark, 0, 0, 0))
            if child.children.get('remark'):
                for remark in child.children['remark'].texts:
                    ts.append(lexer.Token('WORD', remark, 0, 0, 0))
            if child.children.get('node'):
                for node in child.children['node'].texts:
                    ts.append(lexer.Token('WORD', node, 0, 0, 0))
            if child.children.get('type'):
                if child.children['type'].texts:
                    ts.append(lexer.Token('WORD', ' Types: ', 0, 0, 0))
                    # TODO(holtgrew): Add @link?
                    ts.append(
                        lexer.Token(
                            'WORD',
                            translateTypename(child.children['type'].texts[0]),
                            0, 0, 0))
                for txt in child.children['type'].texts[1:]:
                    # TODO(holtgrew): Add @link?
                    ts.append(
                        lexer.Token('WORD', ', ' + translateTypename(txt), 0,
                                    0, 0))
            if child.children.get('concept'):
                if child.children['concept'].texts:
                    ts.append(lexer.Token('WORD', ' Concepts: ', 0, 0, 0))
                    ts.append(
                        lexer.Token('WORD', child.children['concept'].texts[0],
                                    0, 0, 0))
                for txt in child.children['concept'].texts[1:]:
                    ts.append(lexer.Token('WORD', ', ' + txt, 0, 0, 0))
            if child.children.get('class'):
                if child.children['class'].texts:
                    ts.append(lexer.Token('WORD', ' Classes: ', 0, 0, 0))
                    ts.append(
                        lexer.Token('WORD', child.children['class'].texts[0],
                                    0, 0, 0))
                for txt in child.children['class'].texts[1:]:
                    ts.append(lexer.Token('WORD', ', ' + txt, 0, 0, 0))
            if child.children.get('default'):
                if child.children['default'].texts:
                    ts.append(lexer.Token('WORD', ' Default: ', 0, 0, 0))
                    ts.append(
                        lexer.Token('WORD', child.children['default'].texts[0],
                                    0, 0, 0))
                for txt in child.children['default'].texts[1:]:
                    ts.append(lexer.Token('WORD', ', ' + txt, 0, 0, 0))
            if child.children.get('value'):
                if child.children['value'].texts:
                    ts.append(lexer.Token('WORD', ' Values: ', 0, 0, 0))
                    ts.append(
                        lexer.Token('WORD', child.children['value'].texts[0],
                                    0, 0, 0))
                for txt in child.children['value'].texts[1:]:
                    ts.append(lexer.Token('WORD', ', ' + txt, 0, 0, 0))
            if node.children.get('metafunction'):
                if node.children['metafunction'].texts:
                    ts.append(lexer.Token('WORD', ' Metafunctions: ', 0, 0, 0))
                    ts.append(
                        lexer.Token('WORD',
                                    node.children['metafunction'].texts[0], 0,
                                    0, 0))
                for txt in node.children['metafunction'].texts[1:]:
                    ts.append(lexer.Token('WORD', ', ' + txt, 0, 0, 0))
            if child.children.get('see'):
                if child.children['see'].texts:
                    ts.append(lexer.Token('WORD', ' Sees: ', 0, 0, 0))
                    ts.append(
                        lexer.Token('WORD', child.children['see'].texts[0], 0,
                                    0, 0))
                for txt in child.children['see'].texts[1:]:
                    ts.append(lexer.Token('WORD', ', ' + txt, 0, 0, 0))
            ts = translateTokens(ts)
            raw_text = raw_doc.RawText(ts)
            name_text = raw_doc.RawText([lexer.Token('WORD', name, 0, 0, 0)])
            if hasattr(entry, 'addParam'):
                raw_param = raw_doc.RawParam(name_text, raw_text)
                entry.addParam(raw_param)
            else:
                raw_param = raw_doc.RawTParam(name_text, raw_text)
                entry.addTParam(raw_param)

            # Check that we processed all attributes.
            unhandled = set(child.children.keys()) - set([
                'summary', 'remarks', 'type', 'default', 'text', 'concept',
                'metafunction', 'remark', 'note', 'see', 'class', 'value',
                'nowarn', 'tableheader', 'table'
            ])
            if unhandled:
                print 'Missed %s in %s' % (unhandled, node)
                sys.exit(1)
示例#25
0
 def processSignatures(self, entry, node):
     for text in node.texts:
         t = lexer.Token('WORD', text, 0, 0, 0)
         raw_text = raw_doc.RawText(translateTokens([t]))
         raw_sig = raw_doc.RawSignature(raw_text)
         entry.addSignature(raw_sig)
示例#26
0
 def processStatus(self, entry, node):
     # TODO(holtgrew): Add support for @deprecated.
     for text in node.texts:
         t = lexer.Token('WORD', 'Status: ' + text, 0, 0, 0)
         raw_text = raw_doc.RawText(translateTokens([t]))
         entry.addParagraph(raw_doc.RawParagraph(raw_text))
示例#27
0
 def processCite(self, entry, node):
     for text in node.texts:
         t = lexer.Token('WORD', text, 0, 0, 0)
         raw_text = raw_doc.RawText(translateTokens([t]))
         entry.addParagraph(raw_doc.RawParagraph(raw_text))
示例#28
0
 def processWiki(self, entry, node):
     for text in node.texts:
         t = lexer.Token('WORD', 'http://trac.seqan.de/wiki/' + text, 0, 0,
                         0)
         raw_text = raw_doc.RawText([t])
         entry.addParagraph(raw_doc.RawParagraph(raw_text))
示例#29
0
 def processInclude(self, entry, node):
     for text in node.texts:
         t = lexer.Token('WORD', text, 0, 0, 0)
         raw_text = raw_doc.RawText(translateTokens([t]))
         raw_headerfile = raw_doc.RawHeaderfile(raw_text)
         entry.addHeaderfile(raw_headerfile)
示例#30
0
 def processDemo(self, entry, node):
     for text in node.texts:
         t1 = lexer.Token('WORD', 'Demo: ', 0, 0, 0)
         t2 = lexer.Token('WORD', text, 0, 0, 0)
         raw_text = raw_doc.RawText(translateTokens([t1, t2]))
         entry.addParagraph(raw_doc.RawParagraph(raw_text))