Ejemplo n.º 1
0
def process_expressions(tree):
    node = tree.children[0]
    while id(node) != id(tree):
       if len(node.children) > 0:
          if node.rule == u'ASSIGNMENT':
             process_expr_tree(node)
             remove_parentheses(node)
             aux_node = ParseTree('VARLIST')
             aux_node.parent = node
             aux_node2 = ParseTree('EXPRLIST')
             aux_node2.parent = node
             aux_node.value = lexer.Token('DEV', u'1')
             aux_node2.value = lexer.Token('DEV', u'2') 
             node.children[0].parent = aux_node
             node.children[0].next = None
             node.children[1].next = None
             node.children[1].parent = aux_node2
             aux_node.children.append(node.children[0])
             aux_node2.children.append(node.children[1])
             node.children = [aux_node, aux_node2]
             comma_detection(node)
             node = node.up_node()
          elif node.rule == u'EXPR' or node.rule == u'READ':
             process_expr_tree(node)
             remove_parentheses(node)
             comma_detection(node)
             node = node.up_node()
          else:
             node = node.children[0]
       else:
          node = node.up_node()
Ejemplo n.º 2
0
 def setUp(self):
     self.brief_tok = lexer.Token('WORD', 'This is brief.', 0, 0, 0)
     self.name_tok = lexer.Token('WORD', 'var', 0, 0, 0)
     self.type_tok = lexer.Token('WORD', 'int', 0, 0, 0)
     self.tok_see = lexer.Token('WORD', 'See', 0, 0, 0)
     self.tok_sig = lexer.Token('WORD', 'payload', 0, 0, 0)
     self.formatter = raw_doc.DoxFormatter()
Ejemplo n.º 3
0
 def setUp(self):
     self.tok_name = lexer.Token('WORD', 'name', 0, 0, 0)
     self.tok_text = lexer.Token('WORD', 'text', 0, 0, 0)
     self.tok_inout = lexer.Token('PARAM_IN_OUT', '[in,out]', 0, 0, 0)
     self.txt_name = raw_doc.RawText([self.tok_name])
     self.txt_text = raw_doc.RawText([self.tok_text])
     self.formatter = raw_doc.DoxFormatter()
Ejemplo n.º 4
0
    def processValues(self, entry, node, value_nodes):
        result = []
        enum_name = translate(node.key)
        #print value_nodes
        for key, value in value_nodes.children.iteritems():
            raw_var = raw_doc.RawVariable()
            #print value
            var_name = value.key
            if '::' in enum_name:
                var_name = enum_name.split('::')[0] + '::' + var_name
            t = lexer.Token('WORD', enum_name, 0, 0, 0)
            raw_var.type = raw_doc.RawText([t])
            t = lexer.Token('WORD', enum_name + '#' + var_name, 0, 0, 0)
            raw_var.name = raw_doc.RawText([t])

            processed = set()
            if value.children.get('summary'):
                self.processSummary(raw_var, value.children['summary'])
                processed.add('summary')

            unhandled = set(value.children.keys()) - processed
            if unhandled:
                print 'Missed %s in %s' % (unhandled, child)
                sys.exit(1)

            #print raw_var.getFormatted(formatter)
            result.append(raw_var)
        return result
Ejemplo n.º 5
0
 def setUp(self):
     self.brief_tok = lexer.Token('WORD', 'This is brief.', 0, 0, 0)
     self.name_tok = lexer.Token('WORD', 'Adaption', 0, 0, 0)
     self.title_tok = lexer.Token('WORD', 'Adaption Title', 0, 0, 0)
     self.tok_see = lexer.Token('WORD', 'See', 0, 0, 0)
     self.tok_sig = lexer.Token('WORD', 'payload', 0, 0, 0)
     self.formatter = raw_doc.DoxFormatter()
Ejemplo n.º 6
0
 def processDescription(self, entry, node):
     for text in node.texts:
         if text.startswith('type=text:'):
             t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
         else:
             t = lexer.Token('WORD', text, 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
         entry.addParagraph(raw_doc.RawParagraph(raw_text))
Ejemplo n.º 7
0
 def testConstructionWithTokens(self):
     tokens = [
         lexer.Token('WORD', 'test', 0, 0, 0),
         lexer.Token('SPACE', ' ', 0, 0, 0),
         lexer.Token('WORD', 'foo', 0, 0, 0)
     ]
     text = raw_doc.RawText(tokens)
     self.assertEqual(text.tokens, tokens)
     self.failIf(text.empty)
Ejemplo n.º 8
0
 def processSummary(self, entry, node):
     for text in node.texts:
         if text.startswith('type=text:'):
             t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
         else:
             t = lexer.Token('WORD', text, 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
         entry.addBrief(raw_doc.RawBrief(raw_text))
Ejemplo n.º 9
0
 def setUp(self):
     self.path_t = lexer.Token('WORD', 'apath', 0, 0, 0)
     self.path = raw_doc.RawText([self.path_t])
     self.snippet_t0 = lexer.Token('WORD', 'The', 0, 0, 0)
     self.snippet_t1 = lexer.Token('SPACE', ' ', 0, 0, 0)
     self.snippet_t2 = lexer.Token('WORD', 'snippet', 0, 0, 0)
     self.snippet = raw_doc.RawText(
         [self.snippet_t0, self.snippet_t1, self.snippet_t2])
     self.formatter = raw_doc.DoxFormatter()
Ejemplo n.º 10
0
def passage(tokenQueue):
    """
	This function takes a token queue as input and returns a tuple containing
	a token and a token queue.
	>>> tokenQueue = make_test_queue("passage")
	>>> _passage, tokenQueue = passage(tokenQueue)
	>>> tokenQueue
	[Token('EOF', 'EOF', 'None')]
	>>> _passage
	Token('None', 'PASSAGE', 'None')

	Recall that the first component of a passage is the passage command.
	>>> _passage.children[0]
	Token('passage', 'PASSAGECOMMAND', '\passage')

	Passages can be followed by other passages.
	>>> tokenQueue = make_test_queue("passage2")
	>>> _passage, tokenQueue = passage(tokenQueue)

	Second passage should remain in tokenQueue, looping happens in story function.
	>>> tokenQueue
	[Token('passage', 'PASSAGECOMMAND', '\passage'), Token('{', 'LEFTCURLY', '{'), Token('second', 'CHARACTER', 'second'), Token('}', 'RIGHTCURLY', '}')]
	>>> _passage
	Token('None', 'PASSAGE', 'None')
	"""
    token = next_token(tokenQueue)
    #print("tokenQueue after getting next token in passage:", tokenQueue)
    _passage = lexer.Token(token_type="PASSAGE")
    _passage.children.append(token)
    _argument, tokenQueue = argument(
        tokenQueue)  #get arguments (right now just psgtitle)
    #print("tokenQueue after running argument in passage:", tokenQueue)
    if _argument == False:
        return (False, None)
    elif len(_argument) == 1:
        _passage.children.append(_argument[0])
    else:
        for i in _argument:
            _passage.children.append(i)
    token = lexer.Token()
    while token.token_type != "EOF" and token.token_type != "PASSAGECOMMAND":
        _text, tokenQueue = text(tokenQueue)
        #print("tokenQueue in while loop of passage:", tokenQueue)
        if _text:
            _passage.children.append(_text)
            token = next_token(tokenQueue)
            #print(token)
        else:
            print("Parsing error: text in passage not correctly formatted")
            return (False, None)
    #print(token)
    tokenQueue.put(token)
    return (_passage, tokenQueue)
Ejemplo n.º 11
0
    def processTags(self, entry, node, tag_nodes):
        result = []
        group_name = translate(node.key)
        for key, value in tag_nodes.children.iteritems():
            raw_tag = raw_doc.RawTag()
            tag_name = value.key
            t = lexer.Token('WORD', group_name + '#' + tag_name, 0, 0, 0)
            raw_tag.name = raw_doc.RawText([t])

            processed = set()
            if value.children.get('summary'):
                self.processSummary(raw_tag, value.children['summary'])
                processed.add('summary')
            if value.children.get('remarks'):
                self.processRemarks(raw_tag, value.children['remarks'])
                processed.add('remarks')
            if value.children.get('remark'):
                self.processRemarks(raw_tag, value.children['remark'])
                processed.add('remark')
            if value.children.get('text'):
                self.processRemarks(raw_tag, value.children['text'])
                processed.add('text')
            if value.children.get('type'):
                ts = []
                if value.children['type'].texts:
                    ts.append(lexer.Token('WORD', ' Types: ', 0, 0, 0))
                    ts.append(
                        lexer.Token('WORD', value.children['type'].texts[0], 0,
                                    0, 0))
                for txt in value.children['type'].texts[1:]:
                    ts.append(lexer.Token('WORD', ', ' + txt, 0, 0, 0))
                raw_text = raw_doc.RawText(ts)
                entry.addParagraph(raw_doc.RawParagraph(raw_text))
                processed.add('type')

            processed.add('signature')  # Ignore.
            processed.add('include')  # TODO(holtgrew): Required here?
            processed.add('see')  # TODO(holtgrew): Required here?
            # We do not have double-linking any more.
            processed.add('function')

            unhandled = set(value.children.keys()) - processed
            if unhandled:
                print 'Missed %s in %s processed: %s' % (unhandled, node,
                                                         processed)
                sys.exit(1)

            #print raw_tag.getFormatted(formatter)
            result.append(raw_tag)
        return result
Ejemplo n.º 12
0
 def processRemarks(self, entry, node, is_first=True):
     if is_first:
         raw_text = raw_doc.RawText(
             [lexer.Token('WORD', 'Remarks', 0, 0, 0)])
         raw_section = raw_doc.RawSection(raw_text)
         entry.addParagraph(raw_section)
     for text in node.texts:
         if text.startswith('type=text:'):
             t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
         else:
             t = lexer.Token('WORD', text, 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
         entry.addParagraph(raw_doc.RawParagraph(raw_text))
Ejemplo n.º 13
0
def story(tokenQueue):
    """
	The story function takes a token queue as input and returns a token if parsing
	is successful or False otherwise.
	>>> tokenQueue = make_test_queue("storypass")
	>>> _story = story(tokenQueue)
	>>> _story
	Token('None', 'STORY', 'None')

	"""
    _story = lexer.Token(token_type="STORY")
    _preamble, tokenQueue = preamble(tokenQueue)  #get the preamble
    #debug... print(tokenQueue)

    if _preamble == False:  # if the preamble is NOT correctly formatted...
        #stop right here!
        print("Parsing Error: Preamble not formatted correctly.")
        return False
    else:  # if preamble is correctly formatted,
        _story.children.append(
            _preamble)  # append the preamble to the story tree.
        passages = []  # used to store all passages
        while tokenQueue.queue[0].token_type != "EOF":
            print('passages:', passages)
            _passage, tokenQueue = passage(tokenQueue)  # get passage
            passages.append(_passage)  # add passage to passage list
        for pas in passages:  # for each passage...
            _story.children.append(pas)  # append passage to story tree
    return _story
Ejemplo n.º 14
0
 def _parse(self, lr_table, token_list, code_parser):
     code_parser.on_parse_start()
     cursor = 0
     state_stack = [0]
     token_stack = [SYMBOL_END]
     for token in token_list:
         action, value = lr_table.get_action(state_stack[-1],
                                             token.get_symbol())
         while action == ACTION_R:
             p = self.grammar.get_context().get_production(value)
             prod_list = []
             for _ in xrange(len(p.get_production())):
                 prod_list.insert(0, token_stack.pop())
                 state_stack.pop()
             token_stack.append(lexer.Token(p.get_name()))
             state_stack.append(
                 lr_table.get_goto(state_stack[-1],
                                   token_stack[-1].get_symbol()))
             action, value = lr_table.get_action(state_stack[-1],
                                                 token.get_symbol())
             code_parser.do_semantics(p, token_stack[-1], prod_list)
         if action == ACTION_S:
             token_stack.append(token)
             state_stack.append(value)
             cursor += 1
         elif action == ACTION_ACC:
             print "==============================lr parse acc=============================="
         else:
             print "error"
             return
Ejemplo n.º 15
0
def unary_prefix_evaluator(parser, sym):
    arg = parser.parse_to(sym.rprio)
    if arg is None:
        return CompositeNode(sym.token,
                             [Node(lexer.Token('ERROR', 'MISSING VALUE'))])
    else:
        return CompositeNode(sym.token, [arg])
Ejemplo n.º 16
0
 def processFiles(self, entry, node):
     for text in node.texts:
         if text.startswith('../'):
             text = text[len('../'):]
         t = lexer.Token('WORD', text, 0, 0, 0)
         raw_text = raw_doc.RawText(translateTokens([t]))
         entry.addParagraph(raw_doc.RawInclude(raw_text))
Ejemplo n.º 17
0
def binary_evaluator(parser, left_arg, sym):
    right_arg = parser.parse_to(sym.rprio)
    if right_arg is None:
        return CompositeNode(
            sym.token,
            [left_arg, Node(lexer.Token('ERROR', 'MISSING VALUE'))])
    else:
        return CompositeNode(sym.token, [left_arg, right_arg])
Ejemplo n.º 18
0
 def match(self, nodes):
     types = [node.type for node in nodes]
     trail = ['expression', 'colon', 'expression', 'comma'
              ] * (len(types) // 4 + 1)
     if all(a in b.split('/')
            for a, b in zip(trail, types)) and len(types) % 4 in [0, 3]:
         children = []
         for i in range(0, len(types), 4):
             children.append(
                 ASTNode(lexer.Token('mapping/expression', ''),
                         [nodes[i], nodes[i + 2]]))
         return (len(nodes),
                 ASTNode(lexer.Token('bracket_expr/list/expression', ''), [
                     ASTNode(
                         lexer.Token('comma/comma_expr/expression', ','),
                         children)
                 ]))
Ejemplo n.º 19
0
def needLeft(tok, toks):
    if (tok.tT.tType not in ['Identifier', 'OperatorOnly'
                             ]) or (tok.tT.text not in op.withLeft):
        # insert space or adjacency subop
        return L.Token(tT=L.TokTT(text=(" " if tok.whiteB4 else ""),tType='OperatorOnly'),
                          indent=-1,whiteB4=False,location=tok.location),\
               U.prependGen(tok,toks)
    return tok, toks
Ejemplo n.º 20
0
    def _parse_array_offset(self, expr, env):
        """
        Parses the array offset.

        """
        self._match('[')
        num = self._parse_logical_expression(env)
        self._match(']')
        of_type = expr.get_type().get_of_type()
        width = ast.Constant(lexer.Num(of_type.get_width()), ty.Type.Int)
        expr_id = expr
        offset = ast.Binary(lexer.Token('*'), num, width)
        if type(expr) is ast.Access:
            expr_id = expr.get_access_id()
            offset = ast.Binary(lexer.Token('+'), expr.get_offset(), offset)
        return ast.Access(lexer.Word('[]', lexer.Tag.INDEX), of_type, expr_id,
                          offset)
Ejemplo n.º 21
0
    def _get_next_token(self):
        try:
            self.cur_token = self.lexer.token()

            if self.cur_token is None:
                self.cur_token = lexer.Token(None, None, None)
        except lexer.LexerError as e:
            self._error('Lexer error at position %d' % e.pos)
Ejemplo n.º 22
0
def argument(tokenQueue):
    """
	This function takes a token queue as input and returns a tuple containing a list of
	tokens (which may be length 1) and a token queue.

	>>> _argument, tokenQueue = argument(make_test_queue("argumentpass"))
	>>> _argument
	[Token('None', 'ARGUMENT', 'None')]

	>>> tokenQueue
	[Token('EOF', 'EOF', 'None')]

	The list _argument contains a list of the ARGUMENT type tokens.
	>>> _argument[0]
	Token('None', 'ARGUMENT', 'None')

	Each token in this list should have children, which are the actual arguments
	contained in curly braces.
	>>> _argument[0].children[0]
	Token('argument', 'CHARACTER', 'argument')

	A preamble macro cannot be an argument.
	>>> _argument, tokenQueue = argument(make_test_queue("argumentfail"))
	Parsing Error: argument provided is not a valid argument type.
	"""

    _argument = lexer.Token(token_type="ARGUMENT")
    token = next_token(tokenQueue)
    arguments = []

    if token.token_type == "LEFTCURLY":
        pass
    else:
        return (False, None)

    while token.token_type == "LEFTCURLY":
        anothertoken = next_token(tokenQueue)
        if anothertoken.token_type == "CHARACTER":
            _argument.children.append(anothertoken)
            anothertoken = next_token(tokenQueue)
            if anothertoken.token_type == "RIGHTCURLY":
                arguments.append(_argument)
                token = next_token(tokenQueue)
            else:
                return (False, None)
        elif anothertoken.token_type == "MACROCOMMAND":
            _macro, tokenQueue = macro(token, tokenQueue)
            #print(_macro.children)
            _argument.children.append(_macro)
            if _argument:
                return (_argument, tokenQueue)
        else:
            print(
                "Parsing Error: argument provided is not a valid argument type."
            )
            return (False, None)
    tokenQueue.put(token)
    return (arguments, tokenQueue)
Ejemplo n.º 23
0
 def _processTextNode(self, node):
     texts = []
     for text in node.texts:
         if text.startswith('type=text:'):
             t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
             raw_text = raw_doc.RawParagraph(raw_doc.RawText([t]))
         elif text.startswith('type=section:#'):
             t = lexer.Token('WORD', text[len('type=section:#'):], 0, 0, 0)
             raw_text = raw_doc.RawSection(raw_doc.RawText([t]))
         elif text.startswith('type=subsection:#.#'):
             t = lexer.Token('WORD', text[len('type=subsection:#.#'):], 0,
                             0, 0)
             raw_text = raw_doc.RawSection(raw_doc.RawText([t]), 1)
         else:
             t = lexer.Token('WORD', text, 0, 0, 0)
             raw_par = raw_doc.RawParagraph(raw_doc.RawText([t]))
         pars.append(raw_par)
     return pars
Ejemplo n.º 24
0
    def jumping(self, true_label, false_label, frame):
        """ Generate Expression's Jumping Code

        Arguments:
            true_label: The true branch.
            false_label: The false branch.
        """
        const = Constant(lexer.Num(0), ty.Type.Int)
        self.emitjumps(Rel(lexer.Token('>'), self.reduce(frame), const),
                       true_label, false_label, frame)
 def parse_for_value(self, tk):
     if tk.kind == 'NUMBER' or tk.kind == 'ID':
         self.values_stack.append(Node(tk))
         self.waiting_value = False
     elif tk.lexem in self.prefix_actions:
         self.prefix_actions[tk.lexem](self)
     elif tk.lexem in self.prefix_operators:
         self.push_operator(self.prefix_operators[tk.lexem])
     else:
         self.values_stack.append(Node(lexer.Token('', 'ERROR')))
         self.parse_for_operator(tk)
Ejemplo n.º 26
0
 def processExtends(self, entry, node):
     for text in node.texts:
         if '\u0001' in text:
             continue  # do not add inherited
         if text.startswith('Class.'):
             t = lexer.Token('WORD', text[len('Class.'):], 0, 0, 0)
             raw_text = raw_doc.RawText([t])
             entry.addExtends(raw_doc.RawExtends(raw_text))
         elif text.startswith('Spec.'):
             t = lexer.Token('WORD', text[len('Spec.'):], 0, 0, 0)
             raw_text = raw_doc.RawText([t])
             entry.addExtends(raw_doc.RawExtends(raw_text))
         elif text.startswith('Concept.'):
             t = lexer.Token('WORD', text[len('Concept.'):], 0, 0, 0)
             raw_text = raw_doc.RawText([t])
             if hasattr(entry, 'addImplements'):
                 entry.addImplements(raw_doc.RawImplements(raw_text))
             else:
                 entry.addExtends(raw_doc.RawExtends(raw_text))
         else:
             assert False, str(node)
Ejemplo n.º 27
0
 def processExample(self, entry, node):
     # Add example header.
     raw_text = raw_doc.RawText([lexer.Token('WORD', 'Examples', 0, 0, 0)])
     raw_section = raw_doc.RawSection(raw_text)
     entry.addParagraph(raw_section)
     for text in node.texts:
         if text.startswith('type=text:'):
             t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
             raw_par = raw_doc.RawParagraph(raw_text)
         elif text.startswith('type=code:'):
             t = lexer.Token('WORD',
                             '{.cpp}\n' + text[len('type=code:'):] + '\n',
                             0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
             raw_par = raw_doc.RawCode(raw_text)
         else:
             t = lexer.Token('WORD', text, 0, 0, 0)
             raw_text = raw_doc.RawText(translateTokens([t]))
             raw_par = raw_doc.RawParagraph(raw_text)
         entry.addParagraph(raw_par)
Ejemplo n.º 28
0
    def _get_next_token(self):
        """Advances the parser's internal lexer to the next token.

        This method doesn't return anything; it assigns self.cur_token to the
        next token in the input stream.
        """
        try:
            self.cur_token = self.lexer.token()

            if self.cur_token is None:
                self.cur_token = lexer.Token(None, None, None)
        except lexer.LexerError as e:
            self._error('Lexer error at position {}: {}'.format(e.pos, e))
Ejemplo n.º 29
0
def needNoLeft(tok, toks, opCtx, noneOK):
    if (tok.tT.tType in ['Identifier','OperatorOnly']) and ((posSubop(tok.tT,opCtx)) or \
                         ((tok.tT.text not in op.noLeft) and (tok.tT.text in op.withLeft))):
        # should have had a left or operand preceding subop
        if noneOK:  # none it is then
            return None, U.prependGen(tok, toks)
        # better insert defaultOperand
        defOperandTok = L.Token(tT=L.TokTT(text='!!defaultOperand',
                                           tType='OperatorOnly'),
                                indent=None,
                                whiteB4=False,
                                location=tok.location)
        return defOperandTok, U.prependGen(tok, toks)  # backup a bit
    return tok, toks
Ejemplo n.º 30
0
def migratePages(node):
    #print >>sys.stderr, 'Migrating pages...'
    pages = []
    #print node
    for name, child in node.children.iteritems():
        if name == 'Glossary':
            continue
        page = raw_doc.RawPage()
        page.title.tokens.append(lexer.Token('WORD', name, 0, 0, 0))
        s = 'Page'
        page.name.tokens.append(
            lexer.Token('WORD', s + page.title.text, 0, 0, 0))
        if child.children.get('summary'):
            for text in child.children['summary'].texts:
                if text.startswith('type=text:'):
                    t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
                else:
                    t = lexer.Token('WORD', text, 0, 0, 0)
                raw_text = raw_doc.RawText(translateTokens([t]))
                raw_brief = raw_doc.RawBrief(raw_text)
                page.briefs.append(raw_brief)
        if child.children.get('description'):
            for text in child.children['description'].texts:
                if text.startswith('type=text:'):
                    t = lexer.Token('WORD', text[len('type=text:'):], 0, 0, 0)
                    raw_par = raw_doc.RawParagraph(
                        raw_doc.RawText(translateTokens([t])))
                elif text.startswith('type=section:#'):
                    t = lexer.Token('WORD', text[len('type=section:#'):], 0, 0,
                                    0)
                    raw_par = raw_doc.RawSection(
                        raw_doc.RawText(translateTokens([t])))
                elif text.startswith('type=subsection:#.#'):
                    t = lexer.Token('WORD', text[len('type=subsection:#.#'):],
                                    0, 0, 0)
                    raw_par = raw_doc.RawSection(
                        raw_doc.RawText(translateTokens([t])), 1)
                else:
                    t = lexer.Token('WORD', text, 0, 0, 0)
                    raw_par = raw_doc.RawParagraph(
                        raw_doc.RawText(translateTokens([t])))
                page.body.addParagraph(raw_par)
        pages.append(page)
    print 'RESULTING PAGES %s' % [p.title.text for p in pages]
    return pages