def test_incparser_ast(): pytest.skip("lrp.check is deprecated") lrp = IncParser(grammar) lrp.check("1 + 2 * 3") ast1 = lrp.get_ast() lrp.check("1 + 2 * 1") ast2 = lrp.get_ast() # reparsing should reuse parent node assert ast1.parent is ast2.parent
def load(self): from grammar_parser.bootstrap import BootstrapParser from jsonmanager import JsonManager if _cache.has_key(self.name + "::parser"): root, language, whitespaces = _cache[self.name + "::json"] # parse rules as they are needed by the incremental parser to # detect comments manager = JsonManager(unescape=True) root, language, whitespaces = manager.load(self.filename)[0] pickle_id = hash(self) bootstrap = BootstrapParser(lr_type=1, whitespaces=whitespaces) bootstrap.ast = root bootstrap.parse_rules(root.children[1].children[1].children[0]) pickle_id, whitespace = _cache[self.name + "::parser"] from incparser.incparser import IncParser incparser = IncParser() incparser.from_dict(bootstrap.rules, None, None, whitespace, pickle_id, None) incparser.init_ast() inclexer = _cache[self.name + "::lexer"] incparser.lexer = inclexer # give parser a reference to its lexer (needed for multiline comments) return (incparser, inclexer) else: manager = JsonManager(unescape=True) root, language, whitespaces = manager.load(self.filename)[0] pickle_id = hash(self) bootstrap = BootstrapParser(lr_type=1, whitespaces=whitespaces) bootstrap.ast = root bootstrap.extra_alternatives = self.alts bootstrap.change_startrule = self.extract bootstrap.read_options() bootstrap.parse_both() bootstrap.create_parser(pickle_id) bootstrap.create_lexer() whitespace = bootstrap.implicit_ws() _cache[self.name + "::lexer"] = bootstrap.inclexer _cache[self.name + "::json"] = (root, language, whitespaces) _cache[self.name + "::parser"] = (pickle_id, whitespace) bootstrap.incparser.lexer = bootstrap.inclexer return (bootstrap.incparser, bootstrap.inclexer)
def set_language(self, lang, whitespace): if isinstance(lang, Language): lrp = IncParser(str(lang.grammar), 1, whitespace) lrp.init_ast() lexer = IncrementalLexer(str(lang.priorities)) self.editor.set_mainlanguage(lrp, lexer, lang.name) elif isinstance(lang, EcoGrammar): bootstrap = BootstrapParser(lr_type=1, whitespaces=whitespace) bootstrap.parse(lang.grammar) self.editor.set_mainlanguage(bootstrap.incparser, bootstrap.inclexer, lang.name) elif isinstance(lang, EcoFile): incparser, inclexer = lang.load() self.editor.set_mainlanguage(incparser, inclexer, lang.name)
def parse(self, ecogrammar): # this is only called for grammars based on Eco Grammar (not Eco Grammar (Eco)) from grammars.eco_grammar import eco_grammar as grammar self.lexer = IncrementalLexer(grammar.priorities) self.parser = IncParser(grammar.grammar, 1, True) self.parser.init_ast() self.ast = self.parser.previous_version.parent self.treemanager = TreeManager() self.treemanager.add_parser(self.parser, self.lexer, grammar.name) self.treemanager.import_file(ecogrammar) if self.parser.last_status == False: raise Exception("Invalid input grammar due to syntax errors") self.read_options() self.parse_both() self.create_parser() self.create_lexer()
def load(self, buildlexer=True): from grammar_parser.bootstrap import BootstrapParser from jsonmanager import JsonManager from incparser.incparser import IncParser if self.name + "::parser" in _cache: syntaxtable, whitespaces = _cache[self.name + "::parser"] incparser = IncParser() incparser.syntaxtable = syntaxtable incparser.whitespaces = whitespaces incparser.init_ast() incparser.lang = self.name inclexer = _cache[self.name + "::lexer"] incparser.lexer = inclexer # give parser a reference to its lexer (needed for multiline comments) incparser.previous_version.parent.name = self.name return (incparser, inclexer) else: manager = JsonManager(unescape=True) root, language, whitespaces = manager.load(self.filename)[0] bootstrap = BootstrapParser(lr_type=1, whitespaces=whitespaces) bootstrap.ast = root bootstrap.extra_alternatives = self.alts bootstrap.change_startrule = self.extract bootstrap.read_options() whitespace = bootstrap.implicit_ws() pickle_id = self.pickleid(whitespace) bootstrap.parse_both() bootstrap.create_parser(pickle_id) bootstrap.create_lexer(buildlexer) _cache[self.name + "::lexer"] = bootstrap.inclexer _cache[self.name + "::json"] = (root, language, whitespaces) _cache[self.name + "::parser"] = (bootstrap.incparser.syntaxtable, whitespace) bootstrap.incparser.lang = self.name bootstrap.incparser.previous_version.parent.name = self.name bootstrap.incparser.lexer = bootstrap.inclexer return (bootstrap.incparser, bootstrap.inclexer)
def create_parser(self, pickle_id=None): self.all_terminals.update(self.terminals) for fname, terminals, parentrule in self.functions: if fname.startswith("*match_until"): if Nonterminal(fname) not in self.rules: r = Rule(Nonterminal(fname)) for t in self.all_terminals: if t not in terminals: r.add_alternative( [Nonterminal(fname), Terminal(t)], None, t) r.add_alternative([]) self.rules[r.symbol] = r # remove whitespace before special rule from parent rule, e.g. # multistring ::= "MLS" WS *match_until "MLS" WS # ^ this WS causes shift/reduce conflicts prule = self.rules[Nonterminal(parentrule)] for a in prule.alternatives: for i in range(len(a)): sym = a[i] if sym.name == "WS": if len(a) > i + 1 and a[i + 1].name.startswith( "*match_until"): a.pop(i) break if self.implicit_ws(): ws_rule = Rule() ws_rule.symbol = Nonterminal("WS") ws_rule.add_alternative([Nonterminal("WS"), Terminal("<ws>")]) # get comment rule if self.options.has_key('comment_rule'): cmt_rules = self.options['comment_rule'] for cmt_rule in cmt_rules: if Nonterminal(cmt_rule) in self.rules: ws_rule.add_alternative( [Nonterminal("WS"), Nonterminal("comment")]) if self.implicit_newlines(): ws_rule.add_alternative( [Nonterminal("WS"), Terminal("<return>")]) ws_rule.add_alternative([ Nonterminal("WS"), Terminal("<backslash>"), Terminal("<return>") ]) ws_rule.add_alternative([]) # or empty self.rules[ws_rule.symbol] = ws_rule for a in ws_rule.alternatives: self.prod_ids[Production(ws_rule.symbol, a)] = len(self.prod_ids) # allow whitespace/comments at beginning of file start_rule = Rule() start_rule.symbol = Nonterminal("Startrule") start_rule.add_alternative([Nonterminal("WS"), self.start_symbol]) self.rules[start_rule.symbol] = start_rule self.prod_ids[Production(start_rule.symbol, start_rule.alternatives[0])] = len( self.prod_ids) self.start_symbol = start_rule.symbol incparser = IncParser() incparser.from_dict(self.rules, self.start_symbol, self.lr_type, self.implicit_ws(), pickle_id, self.precedences, self.prod_ids) incparser.init_ast() self.incparser = incparser
def setup_class(cls): cls.lexer = IncrementalLexer(calc1.priorities) cls.parser = IncParser(calc1.grammar, 1, True) cls.parser.init_ast() cls.ast = cls.parser.previous_version