Example #1
0
def run(token_cls):
    print('== {} =='.format(token_cls.__name__))

    class FooNode(ASTNode):
        pass

    class Identifier(FooNode):
        token_node = True

    class Number(FooNode):
        token_node = True

    foo_lexer = Lexer(token_cls)
    foo_lexer.add_rules(
        (Pattern('[0-9]+'), token_cls.Number),
        (Pattern('[a-zA-Z_][a-zA-Z0-9_]*'), token_cls.Identifier),
    )

    emit_and_print_errors(lkt_file='foo.lkt',
                          lexer=foo_lexer,
                          generate_unparser=True)

    BaseToken.Number.name = None
    BaseToken.Identifier.name = None
    print('')
Example #2
0
    Number = WithText()
    Identifier = WithSymbol()
    String = WithText()

    Comment = WithTrivia()
    Whitespace = WithTrivia()

    Alphanumericals = TokenFamily(Def, Error, Example, Null, Number,
                                  Identifier)
    Punctuation = TokenFamily(Comma, Dot, Semicolon, LPar, RPar, LBrace,
                              RBrace, Equal, Plus)
    Comments = TokenFamily(Comment)


foo_lexer = Lexer(Token)
foo_lexer.add_rules(
    (Pattern(r'[ \n\r\t]+'), Token.Whitespace),
    (Literal('def'), Token.Def),
    (Literal('var'), Token.Var),
    (Literal('error'), Token.Error),
    (Literal('example'), Token.Example),
    (Literal('null'), Token.Null),
    (Literal(','), Token.Comma),
    (Literal('.'), Token.Dot),
    (Literal(';'), Token.Semicolon),
    (Literal('('), Token.LPar),
    (Literal(')'), Token.RPar),
    (Literal('{'), Token.LBrace),
    (Literal('}'), Token.RBrace),
    (Literal('='), Token.Equal),
Example #3
0
from langkit.dsl import ASTNode
from langkit.lexer import Ignore, Lexer, LexerToken, Pattern, WithText
from langkit.parsers import Grammar

from utils import emit_and_print_errors


class BaseToken(LexerToken):
    Example = WithText()
    Whitespace = Ignore()


class FooNode(ASTNode):
    pass


class Example(FooNode):
    token_node = True


foo_lexer = Lexer(BaseToken)
foo_lexer.add_rules((Pattern('[ \t]+'), BaseToken.Whitespace),
                    (Pattern('example'), BaseToken.Example))

g = Grammar('main_rule')
g.add_rules(main_rule=Example(BaseToken.Example))
emit_and_print_errors(g, foo_lexer, generate_unparser=True)

print('Done')
Example #4
0
        Package,
        Renames,
        Type,
        Use,
        Pragma,
        When,
        With,
        Extends,
        String,
        Number,
        Label,
        Char,
    )


gpr_lexer = Lexer(Token)

gpr_lexer.add_patterns(
    ("p_string", r"\"(\"\"|[^\n\"])*\""),
    ("digit", r"[0-9]"),
    ("integer", r"({digit}(_?{digit})*)"),
)

gpr_lexer.add_rules(
    (Pattern(r"[ \t\r\n]+"), Token.Whitespace),
    (Pattern(r"--(.?)+"), Token.Comment),
    (NoCaseLit("all"), Token.All),
    (NoCaseLit("abstract"), Token.Abstract),
    (NoCaseLit("at"), Token.At),
    (NoCaseLit("case"), Token.Case),
    (NoCaseLit("end"), Token.End),
Example #5
0
    Null = WithText()

    Comma = WithText()
    Dot = WithText()
    LPar = WithText()
    RPar = WithText()
    LBrace = WithText()
    RBrace = WithText()
    Equal = WithText()
    Plus = WithText()

    Number = WithText()
    Identifier = WithSymbol()


foo_lexer = Lexer(Token)
foo_lexer.add_rules(
    (Pattern(r'[ \n\r\t]+'), Ignore()),
    (Eof(), Token.Termination),
    (Literal('def'), Token.Def),
    (Literal('error'), Token.Error),
    (Literal('example'), Token.Example),
    (Literal('null'), Token.Null),
    (Literal(','), Token.Comma),
    (Literal('.'), Token.Dot),
    (Literal('('), Token.LPar),
    (Literal(')'), Token.RPar),
    (Literal('{'), Token.LBrace),
    (Literal('}'), Token.RBrace),
    (Literal('='), Token.Equal),
    (Literal('+'), Token.Plus),
Example #6
0
    LBrack = WithText()
    RBrack = WithText()
    LCurl = WithText()
    RCurl = WithText()
    At = WithText()
    Pipe = WithText()
    LArrow = WithText()
    BigRArrow = WithText()
    Box = WithText()
    SubBlockLiteral = WithText()

    Comment = WithTrivia()
    Whitespace = WithTrivia()


lkql_lexer = Lexer(Token)
lkql_lexer.add_rules(
    (Pattern(r"[ \t\n\r]"), Token.Whitespace), (Literal("."), Token.Dot),
    (Literal("?."), Token.QuestionDot), (Literal("?["), Token.QuestionBrack),
    (Literal("?"), Token.Question), (Literal(","), Token.Coma),
    (Literal(";"), Token.SemiCol), (Literal(":"), Token.Colon),
    (Literal("_"), Token.UnderScore), (Literal("="), Token.Eq),
    (Literal("=="), Token.EqEq), (Literal("!="), Token.Neq),
    (Literal("!!"), Token.ExclExcl), (Literal("<"), Token.Lt),
    (Literal("<="), Token.LEq), (Literal(">"), Token.Gt),
    (Literal(">="), Token.GEq), (Literal("and"), Token.And),
    (Literal("or"), Token.Or), (Literal("+"), Token.Plus),
    (Literal("-"), Token.Minus), (Literal("*"), Token.Mul),
    (Literal("/"), Token.Div), (Literal("&"), Token.Amp),
    (Literal("("), Token.LPar), (Literal(")"), Token.RPar),
    (Literal("{"), Token.LCurl), (Literal("}"), Token.RCurl),
Example #7
0
    RBrack = WithText()
    ListPlus = WithText()
    ListStar = WithText()
    Equal = WithText()

    # Keywords
    GrammarKw = WithText()
    OrKw = WithText()
    ClassKw = WithText()
    FunKw = WithText()
    PublicKw = WithText()
    PrivateKw = WithText()
    NullKw = WithText()


lkt_lexer = Lexer(Token)

lkt_lexer.add_patterns(
    ("STRING_DBQ", r'\"(\\\"|[^\n\"])*\"'),
    ("STRING_SQ", r"'(\\'|[^\n'])*'"),
)

lkt_lexer.add_rules(
    # Whitespace & EOF
    (Pattern(r"[ \t\r\n\f]+"), Ignore()),

    # Operators
    (Literal('!'), Token.ExclMark),
    (Literal(':'), Token.Colon),
    (Literal('?'), Token.IntMark),
    (Literal('/'), Token.Div),
Example #8
0
    Tick = NoText()
    Pipe = NoText()
    Assign = NoText()

    # String and char literals
    String = WithText()
    Char = WithSymbol()

    With = NoText()
    Decimal = WithText()
    Integer = WithText()

    # Trivia
    Comment = WithTrivia()

ada_lexer = Lexer(Token)

ada_lexer.add_patterns(
    ('bracket_char', r'(\[\"([0-9A-F][0-9A-F]){2,4}\"\])'),
    ('p_string', r'\"(\"\"|{bracket_char}|[^\n\"])*\"'),
    ('p_percent_string', r'%(%%|{bracket_char}|[^\n%])*%'),

    ('digit', r"[0-9]"),
    ('extended_digit', r"[0-9a-zA-Z]"),
    ('integer', r"({digit}(_?{digit})*)"),
    ('exponent', r"([eE](\+?|-){integer})"),

    ('decimal_literal', r"{integer}(\.?{integer})?{exponent}?"),
    ('integer_literal', r"{integer}{exponent}?"),

    ('base', r"{integer}"),
Example #9
0
def create_lexer(ctx, lkt_units):
    """
    Create and populate a lexer from a Lktlang unit.

    :param list[liblktlang.AnalysisUnit] lkt_units: Non-empty list of analysis
        units where to look for the grammar.
    :rtype: langkit.lexer.Lexer
    """
    import liblktlang

    # Look for the LexerDecl node in top-level lists
    full_lexer = find_toplevel_decl(ctx, lkt_units, liblktlang.LexerDecl,
                                    'lexer')
    with ctx.lkt_context(full_lexer):
        lexer_annot = parse_annotations(ctx, lexer_annotations, full_lexer)

    patterns = {}
    """
    Mapping from pattern names to the corresponding regular expression.

    :type: dict[names.Name, str]
    """

    token_family_sets = {}
    """
    Mapping from token family names to the corresponding sets of tokens that
    belong to this family.

    :type: dict[names.Name, Token]
    """

    token_families = {}
    """
    Mapping from token family names to the corresponding token families.  We
    build this late, once we know all tokens and all families.

    :type: dict[names.Name, TokenFamily]
    """

    tokens = {}
    """
    Mapping from token names to the corresponding tokens.

    :type: dict[names.Name, Token]
    """

    rules = []
    pre_rules = []
    """
    Lists of regular and pre lexing rules for this lexer.

    :type: list[(langkit.lexer.Matcher, langkit.lexer.Action)]
    """

    newline_after = []
    """
    List of tokens after which we must introduce a newline during unparsing.

    :type: list[Token]
    """
    def ignore_constructor(start_ignore_layout, end_ignore_layout):
        """
        Adapter to build a Ignore instance with the same API as WithText
        constructors.
        """
        del start_ignore_layout, end_ignore_layout
        return Ignore()

    def process_family(f):
        """
        Process a LexerFamilyDecl node. Register the token family and process
        the rules it contains.

        :type f: liblktlang.LexerFamilyDecl
        """
        with ctx.lkt_context(f):
            # Create the token family, if needed
            name = names.Name.from_lower(text_as_str(f.f_syn_name))
            token_set = token_family_sets.setdefault(name, set())

            for r in f.f_rules:
                check_source_language(
                    isinstance(r.f_decl, liblktlang.GrammarRuleDecl),
                    'Only lexer rules allowed in family blocks')
                process_token_rule(r, token_set)

    def process_token_rule(r, token_set=None):
        """
        Process the full declaration of a GrammarRuleDecl node: create the
        token it declares and lower the optional associated lexing rule.

        :param liblktlang.FullDecl r: Full declaration for the GrammarRuleDecl
            to process.
        :param None|set[TokenAction] token_set: If this declaration appears in
            the context of a token family, this adds the new token to this set.
            Must be left to None otherwise.
        """
        with ctx.lkt_context(r):
            rule_annot = parse_annotations(ctx, token_annotations, r)

            # Gather token action info from the annotations. If absent,
            # fallback to WithText.
            token_cons = None
            start_ignore_layout = False
            end_ignore_layout = False
            if 'ignore' in rule_annot:
                token_cons = ignore_constructor
            for name in ('text', 'trivia', 'symbol'):
                try:
                    start_ignore_layout, end_ignore_layout = rule_annot[name]
                except KeyError:
                    continue

                check_source_language(token_cons is None,
                                      'At most one token action allowed')
                token_cons = token_cls_map[name]
            is_pre = rule_annot.get('pre_rule', False)
            if token_cons is None:
                token_cons = WithText

            # Create the token and register it where needed: the global token
            # mapping, its token family (if any) and the "newline_after" group
            # if the corresponding annotation is present.
            token_lower_name = text_as_str(r.f_decl.f_syn_name)
            token_name = names.Name.from_lower(token_lower_name)

            check_source_language(
                token_lower_name not in ('termination', 'lexing_failure'),
                '{} is a reserved token name'.format(token_lower_name))
            check_source_language(token_name not in tokens,
                                  'Duplicate token name')

            token = token_cons(start_ignore_layout, end_ignore_layout)
            tokens[token_name] = token
            if token_set is not None:
                token_set.add(token)
            if 'newline_after' in rule_annot:
                newline_after.append(token)

            # Lower the lexing rule, if present
            matcher_expr = r.f_decl.f_expr
            if matcher_expr is not None:
                rule = (lower_matcher(matcher_expr), token)
                if is_pre:
                    pre_rules.append(rule)
                else:
                    rules.append(rule)

    def process_pattern(full_decl):
        """
        Process a pattern declaration.

        :param liblktlang.FullDecl r: Full declaration for the ValDecl to
            process.
        """
        parse_annotations(ctx, [], full_decl)
        decl = full_decl.f_decl
        lower_name = text_as_str(decl.f_syn_name)
        name = names.Name.from_lower(lower_name)

        with ctx.lkt_context(decl):
            check_source_language(name not in patterns,
                                  'Duplicate pattern name')
            check_source_language(
                decl.f_decl_type is None,
                'Patterns must have automatic types in'
                ' lexers')
            check_source_language(
                isinstance(decl.f_val, liblktlang.StringLit)
                and decl.f_val.p_is_regexp_literal,
                'Pattern string literal expected')
            # TODO: use StringLit.p_denoted_value when properly implemented
            patterns[name] = pattern_as_str(decl.f_val)

    def lower_matcher(expr):
        """
        Lower a token matcher to our internals.

        :type expr: liblktlang.GrammarExpr
        :rtype: langkit.lexer.Matcher
        """
        with ctx.lkt_context(expr):
            if isinstance(expr, liblktlang.TokenLit):
                return Literal(json.loads(text_as_str(expr)))
            elif isinstance(expr, liblktlang.TokenNoCaseLit):
                return NoCaseLit(json.loads(text_as_str(expr)))
            elif isinstance(expr, liblktlang.TokenPatternLit):
                return Pattern(pattern_as_str(expr))
            else:
                check_source_language(False, 'Invalid lexing expression')

    def lower_token_ref(ref):
        """
        Return the Token that `ref` refers to.

        :type ref: liblktlang.RefId
        :rtype: Token
        """
        with ctx.lkt_context(ref):
            token_name = names.Name.from_lower(text_as_str(ref))
            check_source_language(token_name in tokens,
                                  'Unknown token: {}'.format(token_name.lower))
            return tokens[token_name]

    def lower_family_ref(ref):
        """
        Return the TokenFamily that `ref` refers to.

        :type ref: liblktlang.RefId
        :rtype: TokenFamily
        """
        with ctx.lkt_context(ref):
            name_lower = text_as_str(ref)
            name = names.Name.from_lower(name_lower)
            check_source_language(
                name in token_families,
                'Unknown token family: {}'.format(name_lower))
            return token_families[name]

    def lower_case_alt(alt):
        """
        Lower the alternative of a case lexing rule.

        :type alt: liblktlang.BaseLexerCaseRuleAlt
        :rtype: Alt
        """
        prev_token_cond = None
        if isinstance(alt, liblktlang.LexerCaseRuleCondAlt):
            prev_token_cond = [
                lower_token_ref(ref) for ref in alt.f_cond_exprs
            ]
        return Alt(prev_token_cond=prev_token_cond,
                   send=lower_token_ref(alt.f_send.f_sent),
                   match_size=int(alt.f_send.f_match_size.text))

    # Go through all rules to register tokens, their token families and lexing
    # rules.
    for full_decl in full_lexer.f_decl.f_rules:
        with ctx.lkt_context(full_decl):
            if isinstance(full_decl, liblktlang.LexerFamilyDecl):
                # This is a family block: go through all declarations inside it
                process_family(full_decl)

            elif isinstance(full_decl, liblktlang.FullDecl):
                # There can be various types of declarations in lexers...
                decl = full_decl.f_decl

                if isinstance(decl, liblktlang.GrammarRuleDecl):
                    # Here, we have a token declaration, potentially associated
                    # with a lexing rule.
                    process_token_rule(full_decl)

                elif isinstance(decl, liblktlang.ValDecl):
                    # This is the declaration of a pattern
                    process_pattern(full_decl)

                else:
                    check_source_language(False,
                                          'Unexpected declaration in lexer')

            elif isinstance(full_decl, liblktlang.LexerCaseRule):
                syn_alts = list(full_decl.f_alts)

                # This is a rule for conditional lexing: lower its matcher and
                # its alternative rules.
                matcher = lower_matcher(full_decl.f_expr)
                check_source_language(
                    len(syn_alts) == 2 and isinstance(
                        syn_alts[0], liblktlang.LexerCaseRuleCondAlt)
                    and isinstance(syn_alts[1],
                                   liblktlang.LexerCaseRuleDefaultAlt),
                    'Invalid case rule topology')
                rules.append(
                    Case(matcher, lower_case_alt(syn_alts[0]),
                         lower_case_alt(syn_alts[1])))

            else:
                # The grammar should make the following dead code
                assert False, 'Invalid lexer rule: {}'.format(full_decl)

    # Create the LexerToken subclass to define all tokens and token families
    items = {}
    for name, token in tokens.items():
        items[name.camel] = token
    for name, token_set in token_family_sets.items():
        tf = TokenFamily(*list(token_set))
        token_families[name] = tf
        items[name.camel] = tf
    token_class = type('Token', (LexerToken, ), items)

    # Create the Lexer instance and register all patterns and lexing rules
    result = Lexer(token_class, 'track_indent' in lexer_annot, pre_rules)
    for name, regexp in patterns.items():
        result.add_patterns((name.lower, regexp))
    result.add_rules(*rules)

    # Register spacing/newline rules
    for tf1, tf2 in lexer_annot.get('spacing', []):
        result.add_spacing((lower_family_ref(tf1), lower_family_ref(tf2)))
    result.add_newline_after(*newline_after)

    return result
Example #10
0
    ImplementsKw = WithText()
    TraitKw = WithText()

    # Trivia
    Comment = WithTrivia()

    # Literals
    String = WithText()
    PString = WithText()
    Char = WithText()
    DocComment = WithText()
    Number = WithText()
    BigNumber = WithText()


lkt_lexer = Lexer(Token)

lkt_lexer.add_patterns(
    ("HEX_DIGIT", r'[0-9a-fA-F]'),
    ("HEX_DIGITS_2", r'{HEX_DIGIT}{HEX_DIGIT}'),
    ("HEX_DIGITS_4", r'{HEX_DIGITS_2}{HEX_DIGITS_2}'),
    ("HEX_DIGITS_8", r'{HEX_DIGITS_4}{HEX_DIGITS_4}'),
    ("STRING_LIT", r'\"(\\\"|[^\n\"])*\"'),
    ("CHAR_LIT",
     r"'(\\'|[^\n']|\\x{HEX_DIGITS_2}|\\u{HEX_DIGITS_4}|\\U{HEX_DIGITS_8})'"),
)

lkt_lexer.add_rules(
    # Whitespace & EOF
    (Pattern(r"[ \t\r\n\f]+"), Ignore()),
Example #11
0
class Token(LexerToken):
    Example = WithText()
    Null = WithText()

    Comma = WithText()
    LPar = WithText()
    RPar = WithText()
    LBrace = WithText()
    RBrace = WithText()
    Plus = WithText()

    Number = WithText()
    Identifier = WithSymbol()


foo_lexer = Lexer(Token, track_indent=True)
foo_lexer.add_rules(
    (Pattern(r'[ \r\t]+'), Ignore()),
    (Eof(), Token.Termination),
    (Literal("example"), Token.Example),
    (Literal("null"), Token.Null),
    (Literal(','), Token.Comma),
    (Literal('('), Token.LPar),
    (Literal(')'), Token.RPar),
    (Literal('{'), Token.LBrace),
    (Literal('}'), Token.RBrace),
    (Literal('+'), Token.Plus),
    (Pattern('[0-9]+'), Token.Number),
    (Pattern('[a-zA-Z_][a-zA-Z0-9_]*'), Token.Identifier),
)
L = foo_lexer
Example #12
0
from __future__ import absolute_import, division, print_function

from langkit.dsl import ASTNode
from langkit.lexer import (Ignore, Lexer, LexerToken, Literal, Pattern,
                           WithText, WithTrivia)
from langkit.parsers import Grammar, List

from utils import build_and_run


class Token(LexerToken):
    Example = WithText()
    Comment = WithTrivia()


foo_lexer = Lexer(Token)
foo_lexer.add_rules(
    (Pattern(r'[ \n\r\t]+'), Ignore()),
    (Literal('example'), Token.Example),
    (Pattern('#(.?)+'), Token.Comment),
)


class FooNode(ASTNode):
    pass


class Example(FooNode):
    pass

Example #13
0
class Token(LexerToken):
    Ident = WithSymbol()

    Colon = WithText()
    Arrow = WithText()
    Equal = WithText()
    ParOpen = WithText()
    ParClose = WithText()
    Backslash = WithText()
    Dot = WithText()

    Comment = WithTrivia()
    Whitespace = WithTrivia()
    Newlines = WithText()


dependz_lexer = Lexer(Token)

dependz_lexer.add_rules(
    # Blanks and trivia
    (Pattern(r"[ \r\t]+"), Token.Whitespace),
    (Pattern(r"[\n]+"), Token.Newlines),
    (Pattern(r"#(.?)+"), Token.Comment),
    (Pattern('[a-zA-Z_][a-zA-Z0-9_]*'), Token.Ident),
    (Literal(':'), Token.Colon),
    (Literal('->'), Token.Arrow),
    (Literal('='), Token.Equal),
    (Literal('('), Token.ParOpen),
    (Literal(')'), Token.ParClose),
    (Literal('\\'), Token.Backslash),
    (Literal('.'), Token.Dot))
Example #14
0
                           WithSymbol, WithText, WithTrivia)
from langkit.parsers import Grammar, List, Or, _

from utils import build_and_run


class Token(LexerToken):
    Def = WithText()
    LPar = WithText()
    RPar = WithText()
    Comma = WithText()
    Identifier = WithSymbol()
    Comment = WithTrivia()


foo_lexer = Lexer(Token, track_indent=True)
foo_lexer.add_rules(
    (Pattern(r'[ \r\t]+'), Ignore()),
    (Literal('def'), Token.Def),
    (Literal(','), Token.Comma),
    (Literal('('), Token.LPar),
    (Literal(')'), Token.RPar),
    (Pattern('[a-zA-Z_][a-zA-Z0-9_]*'), Token.Identifier),
    (Pattern('#.*'), Token.Comment),
)
L = foo_lexer


class FooNode(ASTNode):
    pass
Example #15
0
    Target = WithText()

    # String and char literals
    String = WithText()
    Char = WithSymbol()

    With = WithText()
    Decimal = WithText()
    Integer = WithText()

    # Trivia
    Comment = WithTrivia()
    PrepLine = WithTrivia()


ada_lexer = Lexer(Token)

ada_lexer.add_patterns(
    ('bracket_char', r'(\[\"([0-9A-F][0-9A-F]){2,4}\"\])'),
    ('p_string', r'\"(\"\"|{bracket_char}|[^\n\"])*\"'),
    ('p_percent_string', r'%(%%|{bracket_char}|[^\n%])*%'),

    ('digit', r"[0-9]"),
    ('extended_digit', r"[0-9a-zA-Z]"),
    ('integer', r"({digit}(_?{digit})*)"),
    ('exponent', r"([eE](\+?|-){integer})"),

    ('decimal_literal', r"{integer}(\.?{integer})?{exponent}?"),
    ('integer_literal', r"{integer}{exponent}?"),

    ('base', r"{integer}"),
Example #16
0
    Semicolon = WithText()
    DoubleColon = WithText()
    Assignment = WithText()
    Colon = WithText()

    # Comment
    Comment = WithTrivia()

    # Numeric
    Numeral = WithText()

    # String
    StringLiteral = WithText()


rflx_lexer = Lexer(Token)

rflx_lexer.add_rules(
    (Pattern(r"[ \t\r\n]+"), Ignore()),
    (Pattern(r"--.*"), Token.Comment),
)

# Hack to support keywords that equal attributes
# Inspired by Libadalang grammar (ada/language/lexer.py)
rflx_lexer.add_rules(*[
    Case(
        Literal(text),
        Alt(
            prev_token_cond=(Token.Tick, ),
            send=token,
            match_size=len(text),
Example #17
0
    Pass = WithText()
    LshAssign = WithText()
    BinOr = WithText()
    Rcurl = WithText(end_ignore_layout=True)
    With = WithText()
    Plus = WithText()
    Lt = WithText()
    Number = WithText()
    String = WithText()
    Comment = WithTrivia()

    Identifier = WithSymbol()


python_lexer = Lexer(Token,
                     track_indent=True,
                     pre_rules=[(Pattern(r'\\\n[ \r\t]*'), Ignore())])

python_lexer.add_patterns(
    ("STRING_DBQ", r'\"(\\\"|[^\n\"])*\"'),
    ("STRING_SQ", r"'(\\'|[^\n'])*'"),
    ("MLSTRING_DBQ", r'\"\"\"([^"]|("[^"])|(""[^"])|\n)*\"\"\"'),
    ("MLSTRING_SQ", r"'''([^']|('[^'])|(''[^'])|\n)*'''"),
)

python_lexer.add_rules(
    (Pattern('(u|U)?(r|R)?'
             '({MLSTRING_SQ}|{MLSTRING_DBQ}'
             '|{STRING_SQ}|{STRING_DBQ})'), Token.String),
    (Pattern(r'[ \r\t]+'), Ignore()),
    (Pattern(r"#(.?)+"), Token.Comment),
Example #18
0
def test_invalid_rule_2(l):
    l.add_rules(
        Case(
            Literal('def'),
            Alt(prev_token_cond=(Token.Var, ),
                send=Token.LexingFailure,
                match_size=3), Alt(send=Token.Def, match_size=3)))


for name, value in sorted(locals().iteritems()):
    if not name.startswith('test_'):
        continue

    print('== {} =='.format(name))

    lexer = Lexer(Token)
    value(lexer)

    class FooNode(ASTNode):
        pass

    class Example(FooNode):
        pass

    grammar = Grammar('main_rule')
    grammar.add_rules(main_rule=Example('example'), )

    emit_and_print_errors(grammar, lexer)
    langkit.reset()
    print('')
Example #19
0
    # Trivia
    Comment = WithTrivia()
    PrepLine = WithTrivia()
    Whitespace = WithTrivia()

    Alphanumericals = TokenFamily(
        Identifier, All, Abort, Else, New, Return, Abs, Elsif, Not, Reverse,
        End, Null, Accept, Entry, Select, Access, Exception, Of, Separate,
        Exit, Or, Others, Subtype, And, For, Out, Array, Function, At, Generic,
        Package, Task, Begin, Goto, Pragma, Terminate, Body, Private, Then, If,
        Procedure, Type, Case, In, Constant, Is, Raise, Use, Declare, Range,
        Delay, Limited, Record, When, Delta, Loop, Rem, While, Digits, Renames,
        Do, Mod, Xor, With, Decimal, Integer)


ada_lexer = Lexer(Token)

ada_lexer.add_patterns(
    ('bracket_char', r'(\[\"[0-9a-fA-F]+\"\])'),
    ('p_string', r'\"(\"\"|{bracket_char}|[^\n\"])*\"'),
    ('p_percent_string', r'%(%%|{bracket_char}|[^\n%])*%'),
    ('digit', r"[0-9]"),
    ('extended_digit', r"[0-9a-zA-Z]"),
    ('integer', r"({digit}(_?{digit})*)"),
    ('exponent', r"([eE](\+?|-){integer})"),
    ('decimal_literal', r"{integer}(\.?{integer})?{exponent}?"),
    ('integer_literal', r"{integer}{exponent}?"),
    ('base', r"{integer}"),
    ('based_integer', r"{extended_digit}(_?{extended_digit})*"),
    ('based_decimal_literal',
     r"{base}[#:]{based_integer}(\.{based_integer})?[#:]{exponent}?"),
Example #20
0
    Number = WithSymbol()
    HexNumber = WithSymbol()
    Yes = WithSymbol()
    No = WithSymbol()
    Module = WithSymbol()

    LPar = WithText()
    RPar = WithText()
    Equal = WithText()
    Different = WithText()
    Not = WithText()
    Or = WithText()
    And = WithText()


kconfig_lexer = Lexer(Token)
kconfig_lexer.add_rules(
    (Pattern(r"[ \t\r\n]+"), Ignore()),
    (Pattern(r"#.*"), Ignore()),

    # Keywords
    (Literal("config"), Token.Config),
    (Literal("menuconfig"), Token.Menuconfig),
    (Literal("choice"), Token.Choice),
    (Literal("endchoice"), Token.Endchoice),
    (Literal("comment"), Token.Comment),
    (Literal("menu"), Token.Menu),
    (Literal("endmenu"), Token.Endmenu),
    (Literal("if"), Token.If),
    (Literal("endif"), Token.Endif),
    (Literal("source"), Token.Source),