def test_iter(self): ast = AST() ast['name'] = 'hello' ast['name'] = 'world' ast['value'] = 1 self.assertEqual(['name', 'value'], list(ast)) self.assertEqual([['hello', 'world'], 1], list(ast.values()))
def test_init(self): ast = AST() data = list(reversed([(0, 0), (1, 2), (2, 4), (3, 6), (4, 8), (5, 10)])) for k, v in data: ast[k] = v self.assertEqual(data, list(ast.items()))
def elements(self, ast): elements = [e for e in ast if e is not None] if not elements: return model.Void() elif len(elements) == 1: return elements[0] else: return model.Sequence(AST(sequence=elements))
def __init__(self, ast=None, exp=None, **kwargs): if exp is not None: self.exp = exp elif not isinstance(ast, AST): # Patch to avoid bad interactions with attribute setting in Model. # Also a shortcut for subexpressions that are not ASTs. ast = AST(exp=ast) super(Decorator, self).__init__(ast) assert isinstance(self.exp, Model)
def test_add(self): ast = AST() ast['name'] = 'hello' self.assertIsNotNone(ast.name) self.assertEqual('hello', ast.name) ast['name'] = 'world' self.assertEqual(['hello', 'world'], ast.name) ast['value'] = 1 self.assertEqual(1, ast.value)
def _initialize_caches(self): self._ast_stack = [AST()] self._concrete_stack = [None] self._rule_stack = [] self._cut_stack = [False] self._last_node = None self._state = None self._lookahead = 0 self._recursive_rules = set() self._clear_memoizetion_caches()
def __init__(self, ast, name, exp, base, params, kwparams, decorators=None): super(BasedRule, self).__init__( ast, name, exp, params or base.params, kwparams or base.kwparams, decorators=decorators ) self.base = base ast = AST(sequence=[self.base.exp, self.exp]) ast.set_parseinfo(self.base.parseinfo) self.rhs = Sequence(ast)
class TOKEN_TREE: tokens: list RULE = grammars.Rule( ast=None, name="TOKEN_TREE", exp=grammars.Choice([ grammars.RuleRef("LITERAL"), grammars.RuleRef("IDENT"), grammars.RuleRef("LIFETIME"), grammars.RuleRef("PUNCT"), grammars.Sequence( AST(sequence=[ grammars.Token("("), grammars.Closure(grammars.RuleRef("TOKEN_TREE")), grammars.Token(")"), ])), grammars.Sequence( AST(sequence=[ grammars.Token("{"), grammars.Closure(grammars.RuleRef("TOKEN_TREE")), grammars.Token("}"), ])), grammars.Sequence( AST(sequence=[ grammars.Token("["), grammars.Closure(grammars.RuleRef("TOKEN_TREE")), grammars.Token("]"), ])), ]), params=None, kwparams=None, ) @classmethod def from_ast(cls, ast) -> TOKEN_TREE: return cls(ast)
def __init__(self, ast=None, **kwargs): super(Comment, self).__init__(ast=AST(comment=ast))
def __init__(self, ast=None, **kwargs): super().__init__(ast=AST(name='@', exp=ast))
def __init__(self, ast=None, **kwargs): self.options = [] super().__init__(ast=AST(options=ast)) assert isinstance(self.options, list), repr(self.options)
def __init__(self, ast=None, **kwargs): self.comment = None super().__init__(ast=AST(comment=ast))
def to_ast(x: str) -> AST: return AST(String='"{}"'.format(x))
def test_empty(self): ast = AST() self.assertIsNone(ast.name)
def _push_ast(self): self._push_cst() self._ast_stack.append(AST())
def __init__(self, ast=None, **kwargs): super(Choice, self).__init__(ast=AST(options=ast)) assert isinstance(self.options, list), urepr(self.options)
def __init__(self, ast=None, **kwargs): super(OverrideList, self).__init__(ast=AST(name='@', exp=ast))
def test_ast(self): ast = AST() self.assertEqual([], list(ast.items())) self.assertTrue(hasattr(ast, '__json__'))
def negative(self, ast): neg = model.NegativeLookahead(ast) any = model.Pattern('.') return model.Sequence(AST(sequence=[neg, any]))
def node_to_tatsu(node: gll_grammar.RuleNode): match node: case gll_grammar.Empty(): return tatsu_grammars.EmptyClosure() case gll_grammar.LabeledNode(name, item): return tatsu_grammars.Named(AST(name=name, exp=node_to_tatsu(item))) case gll_grammar.StringLiteral(s): return tatsu_grammars.Token(ast=s) case gll_grammar.CharacterRange(_, _): raise NotImplementedError("character ranges") case gll_grammar.SymbolName(name): return tatsu_grammars.RuleRef(ast=name) case gll_grammar.Concatenation(items): return tatsu_grammars.Sequence( ast=AST(sequence=list(map(node_to_tatsu, items))) ) case gll_grammar.Alternation(items): return tatsu_grammars.Choice(ast=list(map(node_to_tatsu, items))) case gll_grammar.Option(item): return tatsu_grammars.Optional(exp=node_to_tatsu(item)) case gll_grammar.Repeated(False, item, separator=None, allow_trailing=False): return tatsu_grammars.Closure(exp=node_to_tatsu(item)) case gll_grammar.Repeated(True, item, separator=None, allow_trailing=False): return tatsu_grammars.PositiveClosure(exp=node_to_tatsu(item)) case gll_grammar.Repeated(False, item, separator, allow_trailing=False): return tatsu_grammars.Join( ast=AST(exp=node_to_tatsu(item), sep=tatsu_grammars.Token(ast=separator)) ) case gll_grammar.Repeated(True, item, separator, allow_trailing=False): return tatsu_grammars.PositiveJoin( ast=AST(exp=node_to_tatsu(item), sep=tatsu_grammars.Token(ast=separator)) ) case gll_grammar.Repeated(False, item, separator, allow_trailing=True): item = node_to_tatsu(item) separator = tatsu_grammars.Token(ast=separator) return tatsu_grammars.Sequence( ast=AST( sequence=[ tatsu_grammars.Closure( exp=tatsu_grammars.Sequence( ast=AST( sequence=[ item, separator, ] ) ), ), tatsu_grammars.Optional(exp=item), ] ) ) case gll_grammar.Repeated(True, item, separator, allow_trailing=True): item = node_to_tatsu(item) separator = tatsu_grammars.Token(ast=separator) return tatsu_grammars.Sequence( ast=AST( sequence=[ item, tatsu_grammars.Closure( exp=tatsu_grammars.Sequence( ast=AST( sequence=[ separator, item, ] ) ), ), tatsu_grammars.Optional(exp=separator), ] ) ) case _: # should be unreachable assert False, repr(node)