def parse_file(parser, word_word, debug = 0): symbol = word_word.symbol filename = word_word.get_filename() # Is this really necessary? name, ext = os.path.splitext(os.path.basename(filename)) assert ext == '.ucl', "unknown file extension on: " + filename assert name == word_word.name, \ '%s != %s: internal error' % (name, word_word.name) args = parser_init.parse(parser, scanner, filename, debug = debug, extra_arg = (symbol, parser.token_dict)) if args is not None: return True, args return False, ()
def genparser(filename, rules, token_dict, output_file = sys.stdout): metaparser.Output_file = output_file metaparser.output(""" # parser.py from ucc.parser import scanner_init from ucc.database import ast start = 'file' precedence = ( ('left', 'OR'), ('left', 'AND'), ('right', 'NOT'), ('left', '<', 'LE', 'EQ', 'NE', '>', 'GE'), ('left', 'ARG_LEFT_WORD'), ('right', 'ARG_RIGHT_WORD'), ('left', '+', '-'), ('right', '/'), ('left', '%'), ('left', '*'), ('left', 'BIT_OR'), ('left', 'BIT_XOR'), ('left', 'BIT_AND'), ('right', 'NEGATE', 'BIT_NOT'), ('nonassoc', ')', ']'), ('left', '.'), ) token_dict = { """, output_file = output_file) for item in sorted(token_dict.iteritems()): print >> output_file, " %r: %r," % item print >> output_file, "}\n" tokens = sorted(parser_init.parse(metaparser, metascanner, filename, extra_files = (('rules', rules),)) .union(scanner.tokens)) metaparser.output(""" def p_error(t): if t is None: raise SyntaxError("invalid syntax", scanner_init.syntaxerror_params()) else: raise SyntaxError("invalid syntax", scanner_init.syntaxerror_params(t)) """, output_file = output_file) s = "tokens = %r" % tokens i = s.rfind(',', 0, 79) while len(s) > 79 and i >= 0: print >> output_file, s[:i + 1] s = ' ' + s[i + 1:].lstrip() i = s.rfind(',', 0, 79) print >> output_file, s metaparser.output(""" def init(): pass """, output_file = output_file)