Exemplo n.º 1
0
def main(fobj, start):
    lark_inst = Lark(fobj, parser="lalr", lexer="contextual", start=start)

    print('# The file was automatically generated by Lark v%s' % lark.__version__)
    print('__version__ = "%s"' % lark.__version__)
    print()

    for i, pyfile in enumerate(EXTRACT_STANDALONE_FILES):
        with open(os.path.join(_larkdir, pyfile)) as f:
            code = extract_sections(f)['standalone']
            if i:   # if not this file
                code = strip_docstrings(partial(next, iter(code.splitlines(True))))
            print(code)

    data, m = lark_inst.memo_serialize([TerminalDef, Rule])
    print( 'DATA = (' )
    # pprint(data, width=160)
    print(data)
    print(')')
    print( 'MEMO = (')
    print(m)
    print(')')


    print('Shift = 0')
    print('Reduce = 1')
    print("def Lark_StandAlone(transformer=None, postlex=None):")
    print("  return Lark._load_from_dict(DATA, MEMO, transformer=transformer, postlex=postlex)")
Exemplo n.º 2
0
def main(fobj, start, out=None):
    lark_inst = Lark(fobj,
                     parser="lalr",
                     lexer="contextual",
                     start=start,
                     propagate_positions=True)

    with open(BASE) as base:
        print(base.read(), file=out)

    data, m = lark_inst.memo_serialize([TerminalDef, Rule])
    print('DATA = (', file=out)
    print(data, file=out)
    print(')', file=out)
    print('MEMO = (', file=out)
    print(m, file=out)
    print(')', file=out)

    print('Shift = 0', file=out)
    print('Reduce = 1', file=out)
    print("def Lark_StandAlone(transformer=None, postlex=None, *, tbl):",
          file=out)
    print(
        "  return Lark._load_from_dict(DATA, MEMO, transformer=transformer, postlex=postlex, tbl=tbl)",
        file=out)
Exemplo n.º 3
0
def main(fobj, start):
    lark_inst = Lark(fobj, parser="lalr", lexer="contextual", start=start)

    print('# The file was automatically generated by Lark v%s' %
          lark.__version__)

    for pyfile in EXTRACT_STANDALONE_FILES:
        with open(os.path.join(_larkdir, pyfile)) as f:
            print(extract_sections(f)['standalone'])

    data, m = lark_inst.memo_serialize([TerminalDef, Rule])
    print('DATA = (')
    # pprint(data, width=160)
    print(data)
    print(')')
    print('MEMO = (')
    print(m)
    print(')')

    print('Shift = 0')
    print('Reduce = 1')
    print("def Lark_StandAlone(transformer=None, postlex=None):")
    print(
        "  return Lark._load_from_dict(DATA, MEMO, transformer=transformer, postlex=postlex)"
    )
Exemplo n.º 4
0
def serialize(infile, outfile, lexer, start):
    lark_inst = Lark(infile, parser="lalr", lexer=lexer, start=start)    # TODO contextual

    data, memo = lark_inst.memo_serialize([TerminalDef, Rule])
    outfile.write('{\n')
    outfile.write('  "data": %s,\n' % json.dumps(data))
    outfile.write('  "memo": %s\n' % json.dumps(memo))
    outfile.write('}\n')
Exemplo n.º 5
0
def create_parser_file():
    """
    Parsing the Lark grammar takes about 0.5 seconds. In order to improve performance we can cache the parser
    file. The below code caches the entire python file which is generated by Lark's standalone parser feature
    See: https://github.com/lark-parser/lark/blob/master/lark/tools/standalone.py

    Lark also supports serializing the parser config but the deserialize function did not work for me.
    The lark state contains dicts with numbers as keys which is not supported by json so the serialized
    state can't be written to a json file. Exporting to other file types would have required
    adding additional dependencies or writing a lot more code. Lark's standalone parser
    feature works great but it expects to be run as a separate shell command
    The below code copies some of the standalone parser generator code in a way that we can use
    """
    lark_file = os.path.join(dirname(__file__), 'hcl2.lark')
    with open(lark_file, 'r') as lark_file, open(PARSER_FILE,
                                                 'w') as parser_file:
        lark_inst = Lark(lark_file.read(), parser="lalr", lexer="standard")

        data, memo = lark_inst.memo_serialize([TerminalDef, Rule])

        print(PARSER_FILE_TEMPLATE % (data, memo), file=parser_file)