def parse(input_expr, context, debug=False): with open(os.path.join(os.path.dirname(__file__), 'matrip.peg'), 'r') as f: grammar = f.read() parser = ParserPEG(grammar, "matrip", debug=debug) parse_tree = parser.parse(input_expr) visitor = MatripVisitor(debug=debug) visit_parse_tree(parse_tree, visitor) return generate_base_table(visitor.measures, visitor.expressions, context)
def main(): functions = { "add": zadd, "get": zget, "not": znot, "print": zprint } visitor = ZVisitor(functions) parser = ParserPython(program, comment) prog = sys.stdin.read() if len(sys.argv) == 1 else open(sys.argv[1]).read() parse_tree = parser.parse(prog) visit_parse_tree(parse_tree, visitor)
def solve(file, verbose): simple_parser = ParserPEG(simple_grammar, root_rule_name='expr') advanced_parser = ParserPEG(advanced_grammar, root_rule_name='expr') simple_sum = 0 advanced_sum = 0 for line in file: parse_tree = simple_parser.parse(line) simple_sum += visit_parse_tree(parse_tree, SimpleVisitor()) parse_tree = advanced_parser.parse(line) advanced_sum += visit_parse_tree(parse_tree, AdvancedVisitor()) print('Part 1:', simple_sum) print('Part 2:', advanced_sum)
def parse(input_expr): """ @param input_expr type str @return calcVisitor type CalcVisitor """ parser = ParserPEG(calc_grammar, "calc") parse_tree = parser.parse(input_expr) calcVisitor = CalcVisitor() visit_parse_tree(parse_tree, calcVisitor) return calcVisitor
def parse(content: str) -> 'Atom': from foil.language.grammar import atom from foil.language.grammar import comment parser = ParserPython(atom, comment_def=comment) parse_tree = parser.parse(content) return visit_parse_tree(parse_tree, FoilVisitor())
def parse(content: str) -> 'Literal': from foil.language.grammar import literal from foil.language.grammar import comment parser = ParserPython(literal, comment_def=comment) parse_tree = parser.parse(content) return visit_parse_tree(parse_tree, FoilVisitor())
def parse(filename, debug=False): with open(filename) as file: contents = file.read() parser = ParserPEG(calc_grammar, "start", True) parse_tree = parser.parse(contents) visitor = Visitor(debug=debug) result = visit_parse_tree(parse_tree, visitor ) #Try to get if we found anything or not if ( visitor.functions or visitor.classes): print (visitor.namespace ) return visitor print ("Empty") return None
def main(debug=False): # Grammar is defined using textual specification based on PEG language. # Load grammar form file. calc_grammar = open(os.path.join(os.path.dirname(__file__), 'calc.peg'), 'r').read() # First we will make a parser - an instance of the calc parser model. # Parser model is given in the form of PEG notation therefore we # are using ParserPEG class. Root rule name (parsing expression) is "calc". parser = ParserPEG(calc_grammar, "calc", debug=debug) # An expression we want to evaluate input_expr = "-(4-1)*5+(2+4.67)+5.89/(.2+7)" # Then parse tree is created out of the input_expr expression. parse_tree = parser.parse(input_expr) # The result is obtained by semantic evaluation using visitor class. # visit_parse_tree will start semantic analysis. # In this case semantic analysis will evaluate expression and # returned value will be evaluated result of the input_expr expression. result = visit_parse_tree(parse_tree, CalcVisitor(debug=debug)) # Check that result is valid assert (result - -7.51194444444) < 0.0001 print("{} = {}".format(input_expr, result))
def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-d', '--debug', action='store_true', default=False, help='Debug Arpeggio parser') parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Increase output verbosity') parser.add_argument('--no-order', action='store_true', default=False, help='Do not use OrderedDict') parser.add_argument('--no-visit', action='store_true', default=False, help='Do not visit the parsed tree') parser.add_argument('--rule', default='m_source_file', help='Root rule name') parser.add_argument('source_file', help='Source file to parse') global args args = parser.parse_args() logging.basicConfig(level=logging.DEBUG if args.verbose or args.debug else logging.WARNING, stream=sys.stdout) with open(args.source_file) as source_file: source_code = source_file.read() log.debug('Source file "{}" was read with success.'.format(args.source_file)) with open(m_grammar_file_path) as m_grammar_file: m_grammar = m_grammar_file.read() global m_parser m_parser = ParserPEG(m_grammar, args.rule, debug=args.debug, reduce_tree=False) log.debug('M language clean-PEG grammar was parsed with success.') parse_tree = m_parser.parse(source_code) log.debug('Source file "{}" was parsed with success.'.format(args.source_file)) if not args.no_visit: result = visit_parse_tree(parse_tree, MLanguageVisitor(debug=args.debug)) print(json.dumps(result)) return 0
def get_canonical_compound_unit_dict_from_string(self, unit_string): # parse input and output units parsed_unit = visit_parse_tree(self.parser.parse(unit_string), UnitVisitor(debug=False)) canonical_compound_unit_dict = CanonicalCompoundUnit( parsed_unit).get_unit_object_list() return canonical_compound_unit_dict
def parse_query(self): try: self.parse_tree = self.parser.parse(self.restore_query) except NoMatch as err: return "Error" self.tree_visitor = self.QueryVisitor() # visit_parse_tree is an arpeggio method to parse the parse_tree created from the query try: visit_parse_tree(self.parse_tree, self.tree_visitor) except SemanticError as err: return str(err) if len(self.tree_visitor.bracket_list) != 0: return "Error: brackets not valid" return "Success"
def validate_ged(fname): f = open(fname, 'r') ged = f.read() f.close() try: res = ged_parser.parse(ged) if len(ged) == 0: raise ValueError("Error: File empty! {}".format(fname)) vis = GED_Visitor() ged_content = visit_parse_tree(res, vis) print("... {}".format(fname)) vis.validate() try: p = vis.pretty() fnout = output_filename(fname, prefix=output_file_prefix) f = open(fnout, 'w+') f.write(p) f.close() print("OUTPUT: ") print(" " + fnout) except ValueError as e: print(e) except Exception as e: print(e) print("Sorry, could not parse {}", fname)
def test_semantic_action_results(): global first_sar, third_sar input = "4 3 3 3 a 3 3 b" parser = ParserPython(grammar, reduce_tree=False) result = parser.parse(input) PTDOTExporter().exportFile(result, "test_semantic_action_results_pt.dot") visit_parse_tree(result, TestVisitor()) assert isinstance(first_sar, SemanticActionResults) assert len(first_sar.third) == 3 assert third_sar.third_str[0] == "3"
def simulate(self, max_time): # TODO: here you can change the number of register you want to use # Prepare the environment for the simulation # 3 registers and 256 of memory environment = Environment(self.MAX_REG, 256, self.root.labels) start_time = time.time() current_instruction = 0 current_line = self.root[current_instruction].line try: # Execute the simulation while True: # While the time has not been exceeded elapsed_time = time.time() - start_time if elapsed_time > max_time: raise SimulationError("Maximum time allowed for the simulation exceeded!") # Check if the simulation is over if current_instruction >= len(self.root) - 1: break # Do the instruction logic current_instruction = visit_parse_tree(self.root[current_instruction], SimulatorVisitor(environment)) current_line = self.root[current_instruction].line # Add the line who crashed so it's easier to debug except SimulationError as error: message, = error.args raise SimulationError(message + " occurred at line " + str(current_line)) # Print the finale results environment.print()
def test_mult_div_2(): '''' Test that a combination of multiplication and division with brackets The M language specifies that multiplication are first, and last is the division ''' source_code = ''' regle 123: application : iliad, batch; apple = 3.14*a / (b + 3) - 4; ''' parse_tree = m_parser.parse(source_code) nodes = visit_parse_tree(parse_tree, m_to_ast.MLanguageVisitor()) assert_equal(len(nodes), 1) assert_equal(nodes[0]['type'], 'regle') assert_equal(nodes[0]['name'], '123') assert_equal(len(nodes[0]['formulas']), 1) assert_equal(nodes[0]['formulas'][0]['type'], 'formula') assert_equal(nodes[0]['formulas'][0]['name'], 'apple') assert_equal(nodes[0]['formulas'][0]['expression']['type'], 'sum') assert_equal(len(nodes[0]['formulas'][0]['expression']['operands']), 2) assert_equal(nodes[0]['formulas'][0]['expression']['operands'][0]['type'], 'product') assert_equal( len(nodes[0]['formulas'][0]['expression']['operands'][0]['operands']), 3) assert_equal( nodes[0]['formulas'][0]['expression']['operands'][0]['operands'][0] ['type'], 'float') assert_equal( nodes[0]['formulas'][0]['expression']['operands'][0]['operands'][0] ['value'], 3.14) assert_equal( nodes[0]['formulas'][0]['expression']['operands'][0]['operands'][1] ['type'], 'symbol') assert_equal( nodes[0]['formulas'][0]['expression']['operands'][0]['operands'][1] ['value'], 'a') assert_equal( nodes[0]['formulas'][0]['expression']['operands'][0]['operands'][2] ['type'], 'invert') assert_equal( nodes[0]['formulas'][0]['expression']['operands'][0]['operands'][2] ['operand']['type'], 'sum') assert_equal( len(nodes[0]['formulas'][0]['expression']['operands'][0]['operands'][2] ['operand']['operands']), 2) assert_equal( nodes[0]['formulas'][0]['expression']['operands'][0]['operands'][2] ['operand']['operands'][0]['type'], 'symbol') assert_equal( nodes[0]['formulas'][0]['expression']['operands'][0]['operands'][2] ['operand']['operands'][0]['value'], 'b') assert_equal(nodes[0]['formulas'][0]['expression']['operands'][1]['type'], 'negate') assert_equal( nodes[0]['formulas'][0]['expression']['operands'][1]['operand'] ['type'], 'integer') assert_equal( nodes[0]['formulas'][0]['expression']['operands'][1]['operand'] ['value'], 4)
def parse_query_str(query_string: str): if len(query_string.strip()) == 0: return {} query_string = re.sub(r'[\s&&[^\n]]+', ' ', query_string) pt = parser.parse(query_string) result = arpeggio.visit_parse_tree(pt, TermsVisitor()) return result
def parse_asp_program_by_arpeggio(asp_source_code: str, do=None, have_comments: bool = True) -> tuple: parser = ap.ParserPython(asp_grammar(), asp_grammar_comments if have_comments else None) parse_tree = parser.parse(asp_source_code) return ap.visit_parse_tree(parse_tree, visitor=do or CodeAsTuple())
def parse_OMCValue__v_1_13( literal: str ): return arpeggio.visit_parse_tree( get_omc_value_parser().parse(literal), visitor.OMCValueVisitor__v_1_13(), )
def test_semantic_action_results(): global first_sar, third_sar input = "4 3 3 3 a 3 3 b" parser = ParserPython(grammar, reduce_tree=False) result = parser.parse(input) PTDOTExporter().exportFile(result, 'test_semantic_action_results_pt.dot') visit_parse_tree(result, Visitor(defaults=True)) assert isinstance(first_sar, SemanticActionResults) assert len(first_sar.third) == 3 assert third_sar.third_str[0] == '3'
def parseResult(self, string): try: p = self.parser() tree = p.parse(string) return visit_parse_tree(tree, DrealParseTreeVisitor()) except NoMatch as e: return None
def parse_string(self, src, grammar=program, filename=None): oldsrcs = self.input_sources self.context.optimization_level = self.optimization_level self.input_sources = src parser = ParserPython( grammar, comment_def=comment, skipws=True, reduce_tree=False, memoization=True, debug=False, ) self.context.parsers.append(parser) self.context.filenames.append(filename) try: parse_tree = parser.parse(self.input_sources) visitor = MuvVisitor(debug=False) visitor.muvparser = self parse_tree = visit_parse_tree(parse_tree, visitor) out = parse_tree.generate_code(self.context) if self.error_found: return False if len(self.context.filenames) == 1: if self.context.filenames[-1]: filetext = " from {0}".format( self.context.filenames[-1] ) else: filetext = '' self.output = ( "( Generated{0} by the MUV compiler. )\n" "( https://github.com/revarbat/pymuv )\n" "{1}\n" ).format(filetext, self.output) self.output += out if not self.error_found and len(self.context.filenames) == 1: if self.wrapper_program: self.output = ( "@program {0}\n" "1 99999 d\n" "1 i\n" "{1}\n" ".\n" "c\n" "q\n" ).format(self.wrapper_program, self.output) return True except MuvError as e: line, col = parser.pos_to_linecol(e.position) self.print_error(filename, line, col, str(e)) return False except NoMatch as e: line, col = parser.pos_to_linecol(e.position) expected = self.simplify_parse_error(e) self.print_error(filename, line, col, "Expected %s" % expected) return False finally: self.input_sources = oldsrcs self.context.parsers.pop() self.context.filenames.pop()
def extractRelation(name): tree = parser.parse(name) tokens = visit_parse_tree(tree, Visitor(defaults=False,debug=False)) tag = None names = [] i = "" for token in tokens: if isinstance(token,unicode): i += token elif tag == None: tag = token names.append(i) i = "" else: raise Exception("Too many relations in field") names.append(i) if tag != None: k = tag[0].lower()+"_name" return {"name": names[0], k: names[1], "tag": tag[0], "value": tag[1]} else: return {"name": names[0], "tag": "simple"}
def parse_components( literal: str ) -> typing.List[ComponentTuple]: return arpeggio.visit_parse_tree( get_omc_record_array_parser().parse(literal), visitor.ComponentArrayVisitor(source=literal), )
def parse(file, enc): with codecs.open(file, "r", encoding=enc) as opened_file: opened_file_content = opened_file.read() parser = ParserPython(segnetics_file, reduce_tree=True) parse_tree = visit_parse_tree(parser.parse(opened_file_content), SegneticsVisitor()) return parse_tree
def parse_terms(self, string: str) -> frozenset: """Return the frozenset computed from given valid ASP-compliant string""" parse_tree = ap.ParserPython(self.grammar).parse(string) if parse_tree: return ap.visit_parse_tree(parse_tree, self.atom_visitor) else: return frozenset()
def main(debug=False): current_dir = os.path.dirname(__file__) peg_grammar = open(os.path.join(current_dir, 'peg.peg')).read() # ParserPEG will use ParserPython to parse peg_grammar definition and # create parser_model for parsing PEG based grammars # In debug mode dot (graphviz) files for parser model # and parse tree will be created for visualization. # Checkout current folder for .dot files. parser = ParserPEG(peg_grammar, 'peggrammar', debug=debug) # Now we will use created parser to parse the same peg_grammar used for # parser initialization. We can parse peg_grammar because it is specified # using PEG itself. print("PARSING") parse_tree = parser.parse(peg_grammar) # ASG should be the same as parser.parser_model because semantic # actions will create PEG parser (tree of ParsingExpressions). parser_model, comment_model = visit_parse_tree( parse_tree, PEGVisitor(root_rule_name='peggrammar', comment_rule_name='comment', ignore_case=False, debug=debug)) if debug: # This graph should be the same as peg_peg_parser_model.dot because # they define the same parser. PMDOTExporter().exportFile(parser_model, "peg_peg_new_parser_model.dot") # If we replace parser_mode with ASG constructed parser it will still # parse PEG grammars parser.parser_model = parser_model parser.parse(peg_grammar)
def main(argv): # Parsing #different alg relation next to each other i.e a*|b require brackets (a*)|b parser = ParserPython(formula) #, debug=True) #, reduce_tree = True) parse_tree = parser.parse(argv) result = visit_parse_tree(parse_tree, formVisitor()) result.tostr() return result
def load_from_str(self, content: str) -> 'Builder': parser = ParserPython(cypher, comment_def=comment) parsed = parser.parse(content) visited = visit_parse_tree(parsed, KnowledgeVisitor()) base = RuleBase(visited['data']) self.load_from_base(base) return self
def visit_calc(self, n, c): _expr = lambda ast: visit_parse_tree(ast, ParserCalc.ExpressionVisitor(debug=False)) if 'repeat' in c.results: repeat = int(c.results['repeat'][0]) if 1 < repeat <= 20: return [_expr(n) for _ in range(repeat)] else: return [_expr(n)]
def language_from_str(language_def, metamodel): """ Constructs parser and initializes metamodel from language description given in textX language. Args: language_def (str): A language description in textX. metamodel (TextXMetaModel): A metamodel to initialize. Returns: Parser for the new language. """ if metamodel.debug: metamodel.dprint("*** PARSING LANGUAGE DEFINITION ***") # Check the cache for already conctructed textX parser if metamodel.debug in textX_parsers: parser = textX_parsers[metamodel.debug] else: # Create parser for TextX grammars using # the arpeggio grammar specified in this module parser = ParserPython(textx_model, comment_def=comment, ignore_case=False, reduce_tree=False, memoization=metamodel.memoization, debug=metamodel.debug, file=metamodel.file) # Cache it for subsequent calls textX_parsers[metamodel.debug] = parser # Parse language description with textX parser try: parse_tree = parser.parse(language_def) except NoMatch as e: line, col = parser.pos_to_linecol(e.position) raise TextXSyntaxError(text(e), line, col) # Construct new parser and meta-model based on the given language # description. lang_parser = visit_parse_tree(parse_tree, TextXVisitor(parser, metamodel)) # Meta-model is constructed. Validate its semantics. metamodel.validate() # Here we connect meta-model and language parser for convenience. lang_parser.metamodel = metamodel metamodel._parser_blueprint = lang_parser if metamodel.debug: # Create dot file for debuging purposes PMDOTExporter().exportFile( lang_parser.parser_model, "{}_parser_model.dot".format(metamodel.rootcls.__name__)) return lang_parser
def parse(version, strict=False): parser = _strict_parser if strict else _permissive_parser try: tree = parser.parse(version.strip()) except NoMatch as exc: six.raise_from(ParseError(str(exc)), None) return visit_parse_tree(tree, VersionVisitor())
def test_smoke(): smoke_m_file_path = os.path.join(script_dir_path, 'valid_formulas.m') with open(smoke_m_file_path) as smoke_m_file: source_code = smoke_m_file.read() parse_tree = m_parser.parse(source_code) nodes = visit_parse_tree(parse_tree, m_to_ast.MLanguageVisitor()) assert_equal(isinstance(nodes, list), True)
def visit_ged(ged): try: res = ged_parser.parse(ged) vis = GED_Visitor() ged_content = visit_parse_tree(res, vis) return vis except e: print(e) print("Sorry, could not parse {}", fname)
def parse(code: str) -> list: ''' Parse a default Prolog syntax :param code: Prolog predicates :return: a list of Prolog ASTs ''' ast0 = prolog_parser.parse(code) ast1 = visit_parse_tree(ast0, PlgVisitor()) return ast1
def _from_peg(self, language_def): parser = ParserPython(peggrammar, comment, reduce_tree=False, debug=self.debug) parser.root_rule_name = self.root_rule_name parse_tree = parser.parse(language_def) return visit_parse_tree(parse_tree, PEGVisitor(self.root_rule_name, self.comment_rule_name, self.ignore_case, debug=self.debug))
def parse(source: str, have_return_value: bool) -> List[Statement]: Context.return_value = have_return_value parser: ParserPython = ParserPython(function_body_grammar, comment_grammar, autokwd=True, memoization=True) try: parsed: ParseTreeNode = parser.parse(source) body: List[Statement] = visit_parse_tree(parsed, ASTBuilder()) except NoMatch as err: handle_parse_error(err, parser) return body
def parse_bibtex(file_name, debug=False): global parser if parser is None: parser = ParserPython(bibfile, debug=debug) with codecs.open(file_name, "r", encoding="utf-8") as bibtexfile: bibtexfile_content = bibtexfile.read() parse_tree = parser.parse(bibtexfile_content) return visit_parse_tree(parse_tree, BibtexVisitor(debug=debug))
def parse_whatid(id_string, parser=None, visitor=None): """ Parses whatami id string into a pair (name, configuration). Makes a best effort to reconstruct python objects. Parameters ---------- id_string : string The whatami id string to parse back. parser : An arpeggio parser or None The parser. Use None to use the default parser. visitor : An arpeggio visitor or None. Semantic actions over the AST. If None, the default visitor (that returns a What object) is used. Returns ------- A two-tuple (what, out_name) what is a `whatami.What` object, containing name and conf out_name is a string or None Examples -------- >>> what = parse_whatid('rfc(n_jobs=multiple(here=100))') >>> print(what.name) rfc >>> print(len(what.conf)) 1 >>> print(what.conf['n_jobs'].conf['here']) 100 """ global DEFAULT_WHATAMI_PARSER if parser is None: parser = DEFAULT_WHATAMI_PARSER if visitor is None: visitor = DEFAULT_WHATAMI_VISITOR try: return visit_parse_tree(parser.parse(id_string), visitor=visitor) except TypeError: # Remove this once arpeggio is released with this fix: # https://github.com/igordejanovic/Arpeggio/pull/21 DEFAULT_WHATAMI_PARSER = build_whatami_parser() raise
def main(debug=False): # First we will make a parser - an instance of the calc parser model. # Parser model is given in the form of PEG notation therefore we # are using ParserPEG class. Root rule name (parsing expression) is "calc". parser = ParserPEG(calc_grammar, "calc", debug=debug) # An expression we want to evaluate input_expr = "-(4-1)*5+(2+4.67)+5.89/(.2+7)" # Then parse tree is created out of the input_expr expression. parse_tree = parser.parse(input_expr) result = visit_parse_tree(parse_tree, CalcVisitor(debug=debug)) # visit_parse_tree will start semantic analysis. # In this case semantic analysis will evaluate expression and # returned value will be evaluated result of the input_expr expression. print("{} = {}".format(input_expr, result))
def main(debug=False): # First we will make a parser - an instance of the CVS parser model. # Parser model is given in the form of clean PEG description therefore we # are using ParserPEG class from arpeggio.clenapeg. Grammar is loaded from # csv.peg file Skipping of whitespace will be done only for tabs and # spaces. Newlines have semantics in csv files. They are used to separate # records. current_dir = os.path.dirname(__file__) csv_grammar = open(os.path.join(current_dir, 'csv.peg'), 'r').read() parser = ParserPEG(csv_grammar, 'csvfile', ws='\t ', debug=debug) # Creating parse tree out of textual input test_data = open(os.path.join(current_dir, 'test_data.csv'), 'r').read() parse_tree = parser.parse(test_data) # Create list of lists using visitor csv_content = visit_parse_tree(parse_tree, CSVVisitor()) print("This is a list of lists with the data from CSV file.") pp = pprint.PrettyPrinter(indent=4) pp.pprint(csv_content)
def main(debug=False): # Load program current_dir = os.path.dirname(__file__) input_program = open(os.path.join(current_dir, 'program.rbt'), 'r').read() # First we will make a parser - an instance of the robot parser model. # Parser model is given in the form of python constructs therefore we # are using ParserPython class. parser = ParserPython(robot, debug=debug) # We create a parse tree out of textual input parse_tree = parser.parse(input_program) # visit_parse_tree will start semantic analysis. # In this case semantic analysis will evaluate expression and # returned value will be the final position of the robot. result = visit_parse_tree(parse_tree, RobotVisitor(debug=debug)) if debug: print("position = ", result)
def main(debug=False): # First we will make a parser - an instance of the calc parser model. # Parser model is given in the form of python constructs therefore we # are using ParserPython class. parser = ParserPython(calc, debug=debug) # An expression we want to evaluate input_expr = "-(4-1)*5+(2+4.67)+5.89/(.2+7)" # We create a parse tree out of textual input_expr parse_tree = parser.parse(input_expr) result = visit_parse_tree(parse_tree, CalcVisitor(debug=debug)) # Check that result is valid assert (result - -7.51194444444) < 0.0001 # visit_parse_tree will start semantic analysis. # In this case semantic analysis will evaluate expression and # returned value will be evaluated result of the input_expr expression. print("{} = {}".format(input_expr, result))
"""Simple Command line Interface.""" import arpeggio # from . import common import common from common_interpreter import CommonInterpreter def parse(instruction): """Parse vim instruction.""" def new_rule(): return ( arpeggio.ZeroOrMore([ common.string, common.unsigned_float, common.signed_float, common.unsigned_integer, common.signed_integer,]), arpeggio.EOF) parser = arpeggio.ParserPython(new_rule, ignore_case=True) return parser.parse(instruction) if __name__ == "__main__": while True: # pylint: disable = invalid-name code = input("> ") # print(code) parse_tree = parse(code) interpreted_parse_tree = arpeggio.visit_parse_tree(parse_tree, CommonInterpreter()) print(interpreted_parse_tree)
def parseWord(cls, word): word = filter_chars(handle_special_chars(word.lower())) parse_tree = wordParser.parse(word) wordObj = visit_parse_tree(parse_tree, cls()) wordObj.word = word return wordObj
def __call__(self, s): try: return visit_parse_tree(self.parser.parse(s), self.CalcVisitor(debug=False)) except Exception as e: # print(e) return None
def tagName(name): tree = parser.parse(name) parsedSeq = visit_parse_tree(tree, Visitor(defaults=False,debug=False)) return parsedSeq
return not children[1] else: return not all(children) def visit_expression(self, node, children): if len(children) == 1: result = children[0] else: result = any(children) return result def visit_neg_expression(self, node, children): if len(children) == 2: return not children[1] else: return not any(children) if __name__ == '__main__': debug = False query_grammar = open('query.peg').read() parser = ParserPEG(query_grammar, "query", debug=debug) input_expr = '"python" and ( "developer" or "programmer")' parse_tree = parser.parse(input_expr) result = visit_parse_tree(parse_tree, QueryVisitor(debug=debug, title='python developer')) print(result) result = visit_parse_tree(parse_tree, QueryVisitor(debug=debug, title='java developer')) print(result)
print("Term = {}".format(term)) return term def visit_expression(self, node, children): """ Adds or substracts terms. Term nodes will be already evaluated. """ if self.debug: print("Expression {}".format(children)) expr = children[0] for i in range(2, len(children), 2): if i and children[i - 1] == "-": expr -= children[i] else: expr += children[i] if self.debug: print("Expression = {}".format(expr)) return expr parser = ParserPython(calc, debug=True) input_expr = "-(4-1)*5+(2+4.67)+5.89/(.2+7)" parse_tree = parser.parse(input_expr) result = visit_parse_tree(parse_tree, CalcVisitor(debug=True)) print(result - -7.51194444444) assert (result - -7.51194444444) < 0.0001 print("{} = {}".format(input_expr, result))
def analyze_history(ast): return visit_parse_tree(ast, HistVisitor())
def language_from_str(language_def, metamodel): """ Constructs parser and initializes metamodel from language description given in textX language. Args: language_def (str): A language description in textX. metamodel (TextXMetaModel): A metamodel to initialize. Returns: Parser for the new language. """ if type(language_def) is not text: raise TextXError("textX accepts only unicode strings.") if metamodel.debug: metamodel.dprint("*** PARSING LANGUAGE DEFINITION ***") # Check the cache for already conctructed textX parser if metamodel.debug in textX_parsers: parser = textX_parsers[metamodel.debug] else: # Create parser for TextX grammars using # the arpeggio grammar specified in this module parser = ParserPython(textx_model, comment_def=comment, ignore_case=False, reduce_tree=False, memoization=metamodel.memoization, debug=metamodel.debug, file=metamodel.file) # Cache it for subsequent calls textX_parsers[metamodel.debug] = parser # Parse language description with textX parser try: parse_tree = parser.parse(language_def) except NoMatch as e: line, col = parser.pos_to_linecol(e.position) raise TextXSyntaxError(text(e), line, col) # Construct new parser and meta-model based on the given language # description. lang_parser = visit_parse_tree(parse_tree, TextXVisitor(parser, metamodel)) # Meta-model is constructed. Validate its semantics. metamodel.validate() # Here we connect meta-model and language parser for convenience. lang_parser.metamodel = metamodel metamodel._parser_blueprint = lang_parser if metamodel.debug: # Create dot file for debuging purposes PMDOTExporter().exportFile( lang_parser.parser_model, "{}_parser_model.dot".format(metamodel.rootcls.__name__)) return lang_parser