Ejemplo n.º 1
0
 def test_rule_capitalization(self):
     grammar = '''
         start = ['test' {rulename}] ;
         {rulename} = /[a-zA-Z0-9]+/ ;
     '''
     test_string = 'test 12'
     lowercase_rule_names = ['nocaps', 'camelCase', 'tEST']
     uppercase_rule_names = ['Capitalized', 'CamelCase', 'TEST']
     ref_lowercase_result = tatsu.parse(
         grammar.format(rulename='reflowercase'),
         test_string,
         rule_name='start')
     ref_uppercase_result = tatsu.parse(
         grammar.format(rulename='Refuppercase'),
         test_string,
         rule_name='start')
     for rulename in lowercase_rule_names:
         result = tatsu.parse(grammar.format(rulename=rulename),
                              test_string,
                              rule_name='start')
         self.assertEqual(result, ref_lowercase_result)
     for rulename in uppercase_rule_names:
         result = tatsu.parse(grammar.format(rulename=rulename),
                              test_string,
                              rule_name='start')
         self.assertEqual(result, ref_uppercase_result)
    def convert(result_type, text):
        text = text.replace('(', ' ( ').replace(')', ' ) ')
        if result_type == 'parse':
            text = text[7:-2]
            ast = parse(TreeStringToList.PARSE_GRAMMAR, text)
            tree = TreeStringToList.flatten_parse_ast(ast)
        elif result_type == 'sentiment':
            text = text.replace('|', ' ').replace('=', ' ')
            ast = parse(TreeStringToList.SENTIMENT_GRAMMAR, text)
            tree = TreeStringToList.flatten_sentiment_ast(ast)
        else:
            raise NotImplementedError(
                f'Analysis type "{result_type}" not implemented')

        tree.sort(key=lambda x: x['id'])
        return tree
Ejemplo n.º 3
0
def parse(source):
    counter = itertools.count()
    ir = asjson(tatsu.parse(
        grammar,
        source,
        eol_comments_re="#.*?$",
    ), )

    return parse_statements(ir, counter)
Ejemplo n.º 4
0
def main():
    with open('calendarGrammar.ebnf', 'r') as fObj:
        GRAMMAR = fObj.read()

    for x in test_lines:
        print("line: '{}'".format(x))
        ast = asjson(parse(GRAMMAR, x.replace("'s", "")))
        print('calendar owner:  {}'.format(ast['calendar_owner']))
        print('time frame: {}'.format(ast['time_frame']))
        print()
Ejemplo n.º 5
0
def main():
    GRAMMAR = read_file('grammar1.tatsu')
    print('Grammar:', GRAMMAR)
    EXPRESSION = '1 + 2 + 3 + 4'
    ast = parse(GRAMMAR, EXPRESSION)
    print('PPRINT')
    pprint.pprint(ast, indent=2, width=20)
    print('Type ast', type(ast))
    print('Expr', EXPRESSION)
    print('Not pretty', ast)
Ejemplo n.º 6
0
 def test_namechars(self):
     grammar = '''
         @@namechars :: '-'
         start =
             "key" ~ ";"  |
             "key-word" ~ ";" |
             "key-word-extra" ~ ";"
             ;
     '''
     self.assertEquals(['key-word-extra', ';'],
                       parse(grammar, 'key-word-extra;'))
Ejemplo n.º 7
0
def main():
    import pprint
    import json
    from tatsu import parse
    from tatsu.util import asjson
    ast = parse(GRAMMAR, '3 + 5 * ( 10 - 20 )')
    print('PPRINT')
    pprint.pprint(ast, indent=2, width=20)
    print()
    print('JSON')
    print(json.dumps(asjson(ast), indent=2))
    print()
Ejemplo n.º 8
0
def main():
    import pprint
    import json
    from tatsu import parse
    from tatsu.util import asjson
    ast = parse(GRAMMAR, '(-b + ((b * b) - (4 * a * c)) ** (1/2)) / (2 * a)')
    #  ast = parse(GRAMMAR, '-b')
    print('PPRINT')
    pprint.pprint(ast, indent=2, width=20)
    print()
    print('JSON')
    print(json.dumps(asjson(ast), indent=2))
    print()
Ejemplo n.º 9
0
def main(sen):
    import pprint
    import json
    from tatsu import parse
    from tatsu.util import asjson
    # test sen = (-b + ((b * b) - (4 * a * c)) ^ (1/2)) / (2 * a)
    ast = parse(GRAMMAR, sen)
    print('PPRINT')
    pprint.pprint(ast, indent=2, width=30)
    print()
    print('JSON')
    print(json.dumps(asjson(ast), indent=2))
    print()
Ejemplo n.º 10
0
def main(sen):
    import pprint
    import json
    from tatsu import parse
    from tatsu.util import asjson
    ast = parse(GRAMMAR, sen)
    print('PPRINT')
    pprint.pprint(ast, indent=2, width=30)
    print()
    print('JSON VALUE')
    print('odedoyin matthew')
    print(json.dumps(asjson(ast), indent=2))
    print()
Ejemplo n.º 11
0
    def build_query(self, query=None, hashes=[], resolve=[], group_by="hash"):
        ast = None if not query else tatsu.parse(self.GRAMMAR, query)

        self.features_exist_or_throw(resolve + [group_by])

        sel = self.build_select(group_by, resolve)
        fro = self.build_from(group_by)
        joi = self.build_join(group_by, self.collect_features(ast, resolve))
        whe = self.build_where(ast, hashes, group_by)
        gro = self.build_group_by(group_by)

        return "SELECT {} FROM {} {} WHERE {} GROUP BY {}".format(
            sel, fro, joi, whe, gro)
Ejemplo n.º 12
0
def main():
    import pprint
    import json
    from tatsu import parse
    from tatsu.util import asjson

    with open(dir_path + '/../bnf/cryo-lang.ebnf') as f:
        ast = parse(f.read(), sys.stdin.read())
    print('PPRINT')
    pprint.pprint(ast, indent=2, width=20)
    print()

    print('JSON')
    print(json.dumps(asjson(ast), indent=2))
    print()
Ejemplo n.º 13
0
    def test_rule_name(self):
        grammar = '''
            @@grammar :: Test

            start = test $;
            test = "test";
        '''
        model = tatsu.compile(grammar=grammar)
        self.assertEqual('Test', model.directives.get('grammar'))
        self.assertEqual('Test', model.name)

        ast = model.parse("test")
        self.assertEqual(ast, "test")

        ast = tatsu.parse(grammar, "test", rule_name='start')
        self.assertEqual(ast, "test")
Ejemplo n.º 14
0
def parse(data):
    ast = tatsu.parse(grammar,
                      data,
                      semantics=Semantics(),
                      eol_comments_re=r'\/\/.*?$')

    types = {}
    for elem in ast:
        if 'type' not in elem:
            continue
        #assert elem['name'] not in types
        types[elem['name']] = parseType(elem['type'])

    ifaces = {}
    services = {}
    for elem in ast:
        if 'functions' not in elem:
            continue
        #assert elem['name'] not in ifaces
        ifaces[elem['name']] = iface = {}
        if elem['serviceNames']:
            services[elem['name']] = list(elem['serviceNames'])

        for func in elem['functions']:
            if func['name'] in iface:
                print >> sys.stderr, 'Duplicate function %s in %s' % (
                    func['name'], elem['name'])
                sys.exit(1)

            assert func['name'] not in iface
            iface[func['name']] = fdef = {}
            fdef['cmdId'] = func['cmdId']
            fdef['doc'] = "\n".join(map(lambda x: x.line, func['doc']))
            fdef['inputs'] = [(name, parseType(type))
                              for type, name in func['inputs']]
            if func['outputs'] is None:
                fdef['outputs'] = []
            elif isinstance(func['outputs'], tatsu.ast.AST):
                fdef['outputs'] = [(None, parseType(func['outputs']))]
            else:
                fdef['outputs'] = [(name, parseType(type))
                                   for type, name in func['outputs']]

    return types, ifaces, services
Ejemplo n.º 15
0
def qps2ast(qps: str):
    """
    Parse qps string to its abstract syntax tree (ast) based on EBNF syntax for
    QPS

    Parameters
    ----------
    qps : str
        qps file in string

    Returns
    -------
    ast : dict
        The beautified abstract syntax tree of the input qps string indexed by
        its labels
    """
    raw_ast = tatsu.parse(QPS_GRAMMAR, qps)
    ast = decorate_ast(raw_ast)
    return ast
Ejemplo n.º 16
0
def translate(text=None, filename=None, name=None, encoding='utf-8', trace=False):
    if text is None and filename is None:
        raise ValueError('either `text` or `filename` must be provided')

    if text is None:
        name = name or path.splitext(path.basename(filename))[0].capitalize()
        with codecs.open(filename, encoding=encoding) as f:
            text = f.read()

    name = name or 'Unknown'

    semantics = ANTLRSemantics(name)
    model = parse(
        antlr_grammar(),
        text,
        name=name,
        filename=filename,
        semantics=semantics,
        trace=trace
    )
    print(model)
Ejemplo n.º 17
0
def translate(text=None, filename=None, name=None, encoding='utf-8', trace=False):
    if text is None and filename is None:
        raise ValueError('either `text` or `filename` must be provided')

    if text is None:
        name = name or path.splitext(path.basename(filename))[0].capitalize()
        with codecs.open(filename, encoding=encoding) as f:
            text = f.read()

    name = name or 'Unknown'

    semantics = ANTLRSemantics(name)
    model = parse(
        antlr_grammar(),
        text,
        name=name,
        filename=filename,
        semantics=semantics,
        trace=trace
    )
    print(model)
Ejemplo n.º 18
0
def build_query(query=None,
                hashes=[],
                resolve=[],
                collapse="GROUP_CONCAT",
                group_by="hash",
                join_type="LEFT"):
    statement = "SELECT {} FROM {} {} WHERE {} GROUP BY {}"

    s_attributes = group_by + ".value"
    s_from = group_by
    s_tables = ""
    s_conditions = "1=1"
    s_group_by = group_by + ".value"
    tables = set(resolve)

    if query is not None and query:
        ast = parse(GRAMMAR, query)
        s_conditions = build_where(ast)
        tables.update(collect_tables(ast))

    if len(hashes):
        s_conditions = s_conditions + " AND hash.hash in ('{}')".format(
            "', '".join(hashes))

    if len(resolve):
        s_attributes = s_attributes + ", " + ", ".join([
            '{}(DISTINCT({}.value))'.format(collapse, table)
            for table in resolve
        ])

    s_tables = " ".join([
        '{} JOIN {} ON {}.hash = {}.hash'.format(join_type, table, group_by,
                                                 table) for table in tables
        if table != group_by
    ])

    return statement.format(s_attributes, s_from, s_tables, s_conditions,
                            s_group_by)
Ejemplo n.º 19
0
def translate_rules(rules, collection_wrap):
    """convert YAML version of rules into Fluent C++

  Args:
    rules (list of str): list of rule definitions from YAML schema

  Returns:
    str: newline-separated string of the form
      "  auto <rulevariable1> = <rulesyntax>
         auto <rulevariable2> = <rulesyntax>
         ...
         return std::make_tuple(<rulevariable1>, <rulevariable2>, ...);
      "
  """
    retval = ''
    for k, v in rules.items():
        grammar = open('./fluent.tatsu').read()
        sem = BloomSemantics()
        setattr(sem, "cwrap", collection_wrap)
        v = tatsu.parse(grammar, k + ': ' + v, parseinfo=True, semantics=sem)
        retval += ("      auto " + v)
    retval += ("      return std::make_tuple(" + ",".join(rules.keys()) +
               ");\n")
    return retval
Ejemplo n.º 20
0
                                compile_expr(prg_ast["return_expr"]))
    code_asm = code_asm.replace("BODY", compile_com(prg_ast["body"]))
    return code_asm


try:
    ast = parse(GRAMMAR,
                """
    main(X, Y){
        a[4] = {1, 2, 3, b[4]};
        b[5];
        c[2][2] = {{ 1, 2}, { 1, 2}};
        i=0;
        while(5>i){
            b[i]=i;
            i=i+1;
        }
        while(X){
            X = X - 1;
            Y = Y + 1;
        }
        return (Y) ; 
        }
       """,
                semantics=Semantics())
    print(ast)
    print(pprint_prg(ast))
    #print(compile_prg(ast))
except Exception as e:
    print(e)
"""
Ejemplo n.º 21
0
    """

from pprint import pprint
from tatsu import parse

GRAMMAR = """
@@grammar :: test
@@nameguard :: False
@@namechars :: '12345'

start = sequence $ ;
sequence = {digit}+ ;
digit = 'x' | '1' | '2' | '3' | '4' | '5' ;"""

test = "23"
ast = parse(GRAMMAR, test)
pprint(ast)  # Prints ['2', '3']

test = "xx"
ast = parse(GRAMMAR, test, nameguard=False)
pprint(ast)
# fingering = [hand] digit ;
# hand = '<' | '>' ;
from pprint import pprint
from tatsu import parse

# test = "2xx&1x2@1&2"
# ast = parse(GRAMMAR, test)
# pprint(ast)  # Prints ['2', '3']

# exit(0)
        | term '*' factor
        | term '/' factor
        | factor
        ;


    factor
        =
        | '(' expression ')'
        | number
        ;


    number = /\d+/ ;
'''

if __name__ == '__main__':
    import pprint
    import json
    from tatsu import parse
    from tatsu.util import asjson

    ast = parse(GRAMMAR, open("wkt_dggs.ebnf").read())
    print('# PPRINT')
    pprint.pprint(ast, indent=2, width=20)
    print()

    # print('# JSON')
    # print(json.dumps(asjson(ast), indent=2))
    # print()
Ejemplo n.º 23
0
def parser(code, locals=None, globals=None):
    return tatsu.parse(
        COMPILED_GRAMMAR,
        code.strip(),
        semantics=DatalogSemantics(locals=locals, globals=globals),
    )
Ejemplo n.º 24
0
 def search(query):
     return parse(GRAMMAR, query, semantics=qs)
Ejemplo n.º 25
0
#!/usr/bin/env python3

import json

from tatsu import parse
from tatsu.util import asjson

slurp = lambda filename: [(f.read(), f.close())
                          for f in [open(filename, 'r')]][0][0]

GRAMMAR = slurp("docopt.peg")

_indent = 4

usage = 'Usage: hello -abc --why <file>'
usage = 'Usage: hello <file>'

ast = parse(GRAMMAR, usage)

print(json.dumps(asjson(ast), indent=_indent))

#
Ejemplo n.º 26
0
    def opname(self, ast):
        return "lra::" + ast

    def rhs_catalog_entry(self, ast):
        return self.cwrap + "(&" + ast + ")"

    def where(self, ast):
        return "filter"

    def cross(self, ast):
        return "make_cross"

    def now(self, ast):
        return "<="

    def next(self, ast):
        return "+="

    def async (self, ast):
        return "<="

    def delete(self, ast):
        return "-="


grammar = open('./fluent2.tatsu').read()
bloom = open('./test.txt').read()
sem = BloomSemantics()
setattr(sem, 'cwrap', 'lra::make_collection')
result = tatsu.parse(grammar, bloom, semantics=sem)
print(result)
Ejemplo n.º 27
0
def parse(data):
    ast = tatsu.parse(grammar,
                      data,
                      semantics=Semantics(),
                      eol_comments_re=r'\/\/.*?$')

    types = {}
    for elem in ast:
        if 'type' not in elem:
            continue
        #assert elem['name'] not in types
        types[elem['name']] = parseType(elem['type'])

    ifaces = {}
    services = {}
    for elem in ast:
        if 'functions' not in elem:
            continue
        #assert elem['name'] not in ifaces
        ifaces[elem['name']] = iface = {"doc": "", "cmds": []}
        if elem['serviceNames']:
            services[elem['name']] = list(elem['serviceNames'])
        iface['doc'] = "\n".join(map(lambda x: x.line, elem['doc']))
        for func in elem['functions']:
            if func['name'] in iface:
                print >> sys.stderr, 'Duplicate function %s in %s' % (
                    func['name'], elem['name'])
                sys.exit(1)

            assert func['name'] not in iface
            fdef = {}
            iface['cmds'].append(fdef)
            fdef['name'] = func['name']
            fdef['cmdId'] = func['cmdId']

            # Handle decorators
            for decorator in func['decorators']:
                if decorator['type'] == 'version':
                    fdef['versionAdded'] = "".join(
                        map(str, decorator['startVersion']))
                    if decorator['postfix'] is None:
                        fdef['lastVersion'] = fdef['versionAdded']
                    elif decorator['postfix'] == '+':
                        fdef['versionRemoved'] = None
                    else:
                        fdef['lastVersion'] = "".join(
                            map(str, decorator['endVersion']))

            # Set default values for "missing" decorators
            if 'versionAdded' not in fdef:
                fdef['versionAdded'] = "1.0.0"
            if 'lastVersion' not in fdef:
                fdef['lastVersion'] = None

            fdef['doc'] = "\n".join(map(lambda x: x.line, func['doc']))
            fdef['inputs'] = [(name, parseType(type))
                              for type, name in func['inputs']]
            if func['outputs'] is None:
                fdef['outputs'] = []
            elif isinstance(func['outputs'], tatsu.ast.AST):
                fdef['outputs'] = [(None, parseType(func['outputs']))]
            else:
                fdef['outputs'] = [(name, parseType(type))
                                   for type, name in func['outputs']]

    return types, ifaces, services
Ejemplo n.º 28
0
    call printf
    pop rbp
    ret"""
    code_asm = code_asm.replace("VAR_LIST", var_list(prg_ast))
    code_asm = code_asm.replace("INIT_VAR", init_var(prg_ast))
    code_asm = code_asm.replace("EVAL_RETURN",
                                compile_expr(prg_ast['return_expr']))
    code_asm = code_asm.replace("BODY", compile_com(prg_ast['body']))
    return code_asm


#try:
ast = parse(GRAMMAR,
            """main(X, Y){
    while(X){
    X = X - 1;
    Y = Y + 1;
    }
    return (Y) ; 
    }
    """,
            semantics=Semantics())
print(ast)
#print(pprint_prg(ast))
print(compile_prg(ast))
#myfile = open("code.asm", 'w')
#myfile.write(compile_prg(ast))
#myfile.close()
#except Exception as e:
#    print(e)
main(x, y, z) {
    z = tripleadd(x, y, z);
    return(z);
}
"""

example4 = """
main(x) {
    add1(x) {
        x = x+1;
        return(x);
    }
    z = add1(x);
    return(z);
}
"""

example = example2
#try:
ast = parse(GRAMMAR, example, semantics=Semantics())
print("example = " + example)
print("ast = " + str(ast))
print()
#print(pprint_prg(ast))
code_asm = compile(ast)
print(code_asm)
myfile = open("code.asm", 'w')
myfile.write(code_asm)
#except Exception as e:
#    print(e)