Beispiel #1
0
    DIV = Series(Text("/"), dwsp__)
    MUL = Series(Text("*"), dwsp__)
    MINUS = Series(Text("-"), dwsp__)
    PLUS = Series(Text("+"), dwsp__)
    group = Series(Series(Drop(Text("(")), dwsp__), expression,
                   Series(Drop(Text(")")), dwsp__))
    sign = Alternative(POSITIVE, NEGATIVE)
    factor = Series(Option(sign), Alternative(NUMBER, VARIABLE, group),
                    ZeroOrMore(Alternative(VARIABLE, group)))
    term = Series(factor, ZeroOrMore(Series(Alternative(DIV, MUL), factor)))
    expression.set(
        Series(term, ZeroOrMore(Series(Alternative(PLUS, MINUS), term))))
    root__ = expression


_raw_grammar = ThreadLocalSingletonFactory(ArithmeticGrammar, ident=1)


def get_grammar() -> ArithmeticGrammar:
    grammar = _raw_grammar()
    if get_config_value('resume_notices'):
        resume_notices_on(grammar)
    elif get_config_value('history_tracking'):
        set_tracer(grammar, trace_history)
    return grammar


def parse_Arithmetic(document,
                     start_parser="root_parser__",
                     *,
                     complete_match=True):
Beispiel #2
0
def LyrikTokenizer(original_text) -> Tuple[str, List[Error]]:
    # Here, a function body can be filled in that adds preprocessor tokens
    # to the source code and returns the modified source.
    return original_text, []


def preprocessor_factory() -> PreprocessorFunc:
    # below, the second parameter must always be the same as LyrikGrammar.COMMENT__!
    find_next_include = gen_find_include_func(RE_INCLUDE, '#.*')
    include_prep = partial(preprocess_includes,
                           find_next_include=find_next_include)
    tokenizing_prep = make_preprocessor(LyrikTokenizer)
    return chain_preprocessors(include_prep, tokenizing_prep)


get_preprocessor = ThreadLocalSingletonFactory(preprocessor_factory, ident=1)

#######################################################################
#
# PARSER SECTION - Don't edit! CHANGES WILL BE OVERWRITTEN!
#
#######################################################################


class LyrikGrammar(Grammar):
    r"""Parser for a Lyrik source file.
    """
    source_hash__ = "fd6b4bce06103ceaab2b3ae06128cc6e"
    disposable__ = re.compile(
        'JAHRESZAHL$|ZEICHENFOLGE$|ENDE$|LEERRAUM$|ziel$|wortfolge$')
    static_analysis_pending__ = []  # type: List[bool]
Beispiel #3
0
    source_hash__ = "4ae7748240bfd917fc406c93eb9214f7"
    disposable__ = re.compile('..(?<=^)')
    static_analysis_pending__ = []  # type: List[bool]
    parser_initialization__ = ["upon instantiation"]
    COMMENT__ = r'#.*'
    comment_rx__ = re.compile(COMMENT__)
    WHITESPACE__ = r'\s*'
    WSP_RE__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
    wsp__ = Whitespace(WSP_RE__)
    EOF = NegativeLookahead(RegExp('.'))
    WORD = Series(RegExp('\\w+'), wsp__)
    document = Series(wsp__, ZeroOrMore(WORD), EOF, mandatory=2)
    root__ = document


_raw_grammar = ThreadLocalSingletonFactory(atfGrammar, ident=1)


def get_grammar() -> atfGrammar:
    grammar = _raw_grammar()
    if get_config_value('resume_notices'):
        resume_notices_on(grammar)
    elif get_config_value('history_tracking'):
        set_tracer(grammar, trace_history)
    return grammar


def parse_atf(document, start_parser="root_parser__", *, complete_match=True):
    return get_grammar()(document, start_parser, complete_match)

Beispiel #4
0
                       ZeroOrMore(Series(Series(Text(","), dwsp__),
                                         component)),
                       Lookahead(FOLLOW_UP),
                       mandatory=1)
    element.set(
        Alternative(Series(Option(retrieveop), symbol,
                           NegativeLookahead(DEF)), literal, plaintext, regexp,
                    Series(character, dwsp__), any_char, whitespace, group))
    countable.set(Alternative(option, oneormore, element))
    expression.set(Series(sequence, ZeroOrMore(Series(OR, dwsp__, sequence))))
    syntax = Series(dwsp__, ZeroOrMore(Alternative(definition, directive)),
                    EOF)
    root__ = syntax


_raw_grammar = ThreadLocalSingletonFactory(FixedEBNFGrammar, ident=1)


def get_grammar() -> FixedEBNFGrammar:
    grammar = _raw_grammar()
    if get_config_value('resume_notices'):
        resume_notices_on(grammar)
    elif get_config_value('history_tracking'):
        set_tracer(grammar, trace_history)
    return grammar


def parse_FixedEBNF(document,
                    start_parser="root_parser__",
                    *,
                    complete_match=True):
    NEGATIVE = RegExp('[-]')
    POSITIVE = RegExp('[+]')
    group = Series(Series(Drop(Text("(")), dwsp__), expression,
                   Series(Drop(Text(")")), dwsp__))
    sign = Alternative(POSITIVE, NEGATIVE)
    factor = Series(Option(sign), Alternative(NUMBER, VARIABLE, group))
    div = Series(factor, Series(Drop(Text("/")), dwsp__), term)
    mul = Series(factor, Series(Drop(Text("*")), dwsp__), term)
    add = Series(term, Series(Drop(Text("+")), dwsp__), expression)
    sub = Series(term, Series(Drop(Text("-")), dwsp__), expression)
    term.set(Alternative(mul, div, factor))
    expression.set(Alternative(add, sub, term))
    root__ = expression


_raw_grammar = ThreadLocalSingletonFactory(ArithmeticRightRecursiveGrammar,
                                           ident=1)


def get_grammar() -> ArithmeticRightRecursiveGrammar:
    grammar = _raw_grammar()
    if get_config_value('resume_notices'):
        resume_notices_on(grammar)
    elif get_config_value('history_tracking'):
        set_tracer(grammar, trace_history)
    return grammar


def parse_ArithmeticRightRecursive(document,
                                   start_parser="root_parser__",
                                   *,
                                   complete_match=True):
Beispiel #6
0
    # Here, a function body can be filled in that adds preprocessor tokens
    # to the source code and returns the modified source.
    return original_text, []


def preprocessor_factory() -> PreprocessorFunc:
    # below, the second parameter must always be the same as ts2dataclassGrammar.COMMENT__!
    find_next_include = gen_find_include_func(
        RE_INCLUDE, '(?:\\/\\/.*)|(?:\\/\\*(?:.|\\n)*?\\*\\/)')
    include_prep = partial(preprocess_includes,
                           find_next_include=find_next_include)
    tokenizing_prep = make_preprocessor(ts2dataclassTokenizer)
    return chain_preprocessors(include_prep, tokenizing_prep)


get_preprocessor = ThreadLocalSingletonFactory(preprocessor_factory, ident=1)

#######################################################################
#
# PARSER SECTION - Don't edit! CHANGES WILL BE OVERWRITTEN!
#
#######################################################################


class ts2dataclassGrammar(Grammar):
    r"""Parser for a ts2dataclass source file.
    """
    _literal = Forward()
    _type = Forward()
    declaration = Forward()
    declarations_block = Forward()
Beispiel #7
0

def LaTeXTokenizer(original_text) -> Tuple[str, List[Error]]:
    return original_text, []


def preprocessor_factory() -> PreprocessorFunc:
    find_next_include = gen_find_include_func(RE_INCLUDE,
                                              LaTeXGrammar.comment_rx__)
    include_prep = partial(preprocess_includes,
                           find_next_include=find_next_include)
    LaTeXPreprocessor = make_preprocessor(LaTeXTokenizer)
    return chain_preprocessors(include_prep, LaTeXPreprocessor)


get_preprocessor = ThreadLocalSingletonFactory(preprocessor_factory, ident=1)

#######################################################################
#
# PARSER SECTION - Don't edit! CHANGES WILL BE OVERWRITTEN!
#
#######################################################################


class LaTeXGrammar(Grammar):
    r"""Parser for a LaTeX source file.
    """
    _block_environment = Forward()
    _text_element = Forward()
    block = Forward()
    paragraph = Forward()
Beispiel #8
0
                    _element,
                    mandatory=1)
    object = Series(Series(Drop(Text("{")), dwsp__),
                    member,
                    ZeroOrMore(
                        Series(Series(Drop(Text(",")), dwsp__),
                               member,
                               mandatory=1)),
                    Series(Drop(Text("}")), dwsp__),
                    mandatory=3)
    _element.set(Alternative(object, array, string, number, _bool, null))
    json = Series(dwsp__, _element, _EOF)
    root__ = json


_raw_grammar = ThreadLocalSingletonFactory(jsonGrammar, ident=1)


def get_grammar() -> jsonGrammar:
    grammar = _raw_grammar()
    if get_config_value('resume_notices'):
        resume_notices_on(grammar)
    elif get_config_value('history_tracking'):
        set_tracer(grammar, trace_history)
    return grammar


def parse_json(document, start_parser="root_parser__", *, complete_match=True):
    return get_grammar()(document, start_parser, complete_match)

Beispiel #9
0
                    Series(Drop(Text('"')), EncName, Drop(Text('"')))))
    VersionInfo = Series(
        dwsp__, Drop(Text('version')), dwsp__, Drop(Text('=')), dwsp__,
        Alternative(Series(Drop(Text("\'")), VersionNum, Drop(Text("\'"))),
                    Series(Drop(Text('"')), VersionNum, Drop(Text('"')))))
    XMLDecl = Series(Drop(Text('<?xml')), VersionInfo, Option(EncodingDecl),
                     Option(SDDecl), dwsp__, Drop(Text('?>')))
    prolog = Series(Option(Series(dwsp__, XMLDecl)), Option(Misc),
                    Option(Series(doctypedecl, Option(Misc))))
    element.set(
        Alternative(emptyElement, Series(STag, content, ETag, mandatory=1)))
    document = Series(prolog, element, Option(Misc), EOF)
    root__ = document


_raw_grammar = ThreadLocalSingletonFactory(XMLSnippetGrammar, ident=1)


def get_grammar() -> XMLSnippetGrammar:
    grammar = _raw_grammar()
    if get_config_value('resume_notices'):
        resume_notices_on(grammar)
    elif get_config_value('history_tracking'):
        set_tracer(grammar, trace_history)
    return grammar


def parse_XMLSnippet(document,
                     start_parser="root_parser__",
                     *,
                     complete_match=True):
Beispiel #10
0
def LyrikTokenizer(original_text) -> Tuple[str, List[Error]]:
    # Here, a function body can be filled in that adds preprocessor tokens
    # to the source code and returns the modified source.
    return original_text, []


def preprocessor_factory() -> PreprocessorFunc:
    # below, the second parameter must always be the same as LyrikGrammar.COMMENT__!
    find_next_include = gen_find_include_func(RE_INCLUDE, '#.*')
    include_prep = partial(preprocess_includes, find_next_include=find_next_include)
    tokenizing_prep = make_preprocessor(LyrikTokenizer)
    return chain_preprocessors(include_prep, tokenizing_prep)


get_preprocessor = ThreadLocalSingletonFactory(preprocessor_factory, ident=1)


#######################################################################
#
# PARSER SECTION - Don't edit! CHANGES WILL BE OVERWRITTEN!
#
#######################################################################

class LyrikGrammar(Grammar):
    r"""Parser for a Lyrik source file.
    """
    source_hash__ = "26385fa0fbbe6e28b8b15d563a5407c9"
    disposable__ = re.compile('JAHRESZAHL$|ZEICHENFOLGE$|ENDE$|LEERRAUM$|ziel$|wortfolge$')
    static_analysis_pending__ = []  # type: List[bool]
    parser_initialization__ = ["upon instantiation"]