Beispiel #1
0
            elements = process_elements(grammar, rulename, elements)
            grammar.add_rule(rulename, *elements)
        while tail:
            item, tail = tail
            if item is None:
                continue
            rulename, alternation = item
            for elements in alternation:
                elements = process_elements(grammar, rulename, elements)
                grammar.add_rule(rulename, *elements)
        return grammar



# (1) Get the tokenizer (2) Compile the grammar (3) Build the parser
abnf_grammar.get_tokenizer()
abnf_grammar.compile_grammar(Context)
abnf_parser = get_parser(abnf_grammar, 'rulelist')


###########################################################################
# The Public API
###########################################################################
def build_grammar(data, context_class=None):
    context = Context(data)
    grammar = abnf_parser.run(data, context)
    grammar.get_tokenizer()
    grammar.compile_grammar(context_class)
    return grammar

Beispiel #2
0
        rulename, alternation = item
        for elements in alternation:
            elements = process_elements(grammar, rulename, elements)
            grammar.add_rule(rulename, *elements)
        while tail:
            item, tail = tail
            if item is None:
                continue
            rulename, alternation = item
            for elements in alternation:
                elements = process_elements(grammar, rulename, elements)
                grammar.add_rule(rulename, *elements)
        return grammar


# (1) Get the tokenizer (2) Compile the grammar (3) Build the parser
abnf_grammar.get_tokenizer()
abnf_grammar.compile_grammar(Context)
abnf_parser = get_parser(abnf_grammar, 'rulelist')


###########################################################################
# The Public API
###########################################################################
def build_grammar(data, context_class=None):
    context = Context(data)
    grammar = abnf_parser.run(data, context)
    grammar.get_tokenizer()
    grammar.compile_grammar(context_class)
    return grammar