示例#1
0
def test_nested_class_synth_model():
    grammar = '''
        start::ASeq
            =
            seqs:aseq
            $
            ;

        aseq::Seq
            =
            values:{'a'}+
            ;
    '''

    m = compile(grammar, 'ASeq')
    model = m.parse('a a a', semantics=ModelBuilderSemantics())
    assert 'ASeq' == type(model).__name__

    p = pickle.dumps(model)
    new_model = pickle.loads(p)
    assert 'ASeq' == type(new_model).__name__

    # NOTE: Since we are unpickling an object which contains nested objects, we can't do
    # self.assertEqual(model.ast, new_model.ast) as the memory locations will be different.
    # So either (1) we recursively walk the objects and compare fields or (2) we convert it into a
    # str()/repr()/JSON and compare that. The latter as it is easier.
    assert asjson(model.ast) == asjson(new_model.ast)
示例#2
0
def test_walk_node_ast():
    GRAMMAR = r'''
        @@grammar::TLA

        #
        # comment
        #
        start = expression $;

        expression = temporal_expression | nontemporal_top_expression ;
        nontemporal_top_expression(SampleExpression) =  expr:nontemporal_expression ;
        temporal_expression =    temporal_atom;
        temporal_atom(TemporalSeq) = ['Seq'] '(' @:temporal_arg_list ')';
        temporal_arg_list = "," .{@+:expression}+;
        nontemporal_expression =  number ;

        # tokens
        number::int = /\d+/;
    '''

    parser = tatsu.compile(GRAMMAR, asmodel=True)
    model = parser.parse('Seq(1,1)')
    assert model.ast is not None

    seen = defaultdict(int)

    class PW(DepthFirstWalker):
        def walk_Node(self, node, *args, **kwargs):
            t = type(node).__name__
            print(f'node {t}')
            seen[t] += 1

    print(json.dumps(asjson(model), indent=2))
    PW().walk(model)
    assert seen == {'SampleExpression': 2, 'TemporalSeq': 1}
def main():
    import pprint
    import json
    from tatsu import parse, compile, to_python_model
    from tatsu.util import asjson

    import tatsu

    input = """SELECT * FROM (SELECT ZSID FROM (SELECT B.* FROM (SELECT A.ID FROM DUNCE AS A) B ) C);"""

    #input = "SELECT * FROM MARA;"

    # Load the grammar in from a reference file.
    with open(GRAMMAR_DIR + "HANA_SQL_Grammar.bnf") as f:
        grammar = f.read()

    model = compile(grammar, verbose=True)

    munged_input = input.replace(',', ' , ')

    ast = model.parse(munged_input)

    #print(ast)

    result = str(json.dumps(asjson(ast), sort_keys=True, indent=2))

    print(result)
示例#4
0
    def generate_ast(self, input):

        munged_input = input.replace(',', '  ,  ')

        self.ast = self.model.parse(munged_input)

        result = str(json.dumps(asjson(self.ast), sort_keys=True))

        return result
示例#5
0
def parse(source):
    counter = itertools.count()
    ir = asjson(tatsu.parse(
        grammar,
        source,
        eol_comments_re="#.*?$",
    ), )

    return parse_statements(ir, counter)
def main():
    with open('calendarGrammar.ebnf', 'r') as fObj:
        GRAMMAR = fObj.read()

    for x in test_lines:
        print("line: '{}'".format(x))
        ast = asjson(parse(GRAMMAR, x.replace("'s", "")))
        print('calendar owner:  {}'.format(ast['calendar_owner']))
        print('time frame: {}'.format(ast['time_frame']))
        print()
示例#7
0
def ccg_to_json(to_parse):
    """Parses an OpenCCG string into a more easily digestiable JSON format.

    Args:
        to_parse: The OpenCCG string.
    Returns:
        A JSON representation of the OpenCCG string.
    """
    return asjson(OpenCCGParser().parse(to_parse,
                                        semantics=ModelBuilderSemantics(),
                                        parseinfo=False))
示例#8
0
def parse_expression(expression):
    # if expression is valid, return ast
    ast = valid_expression(expression)
    if ast:
        import pprint
        # pprint.pprint(ast, indent=2, width=20)
        #data = json.dumps(asjson(pprint.pprint(ast,indent=2, width=20)))
        data = json.dumps(asjson(ast), indent=2)
        store_results(data)
        return data
    return None
示例#9
0
def main():
    import pprint
    import json
    from tatsu import parse
    from tatsu.util import asjson
    ast = parse(GRAMMAR, '3 + 5 * ( 10 - 20 )')
    print('PPRINT')
    pprint.pprint(ast, indent=2, width=20)
    print()
    print('JSON')
    print(json.dumps(asjson(ast), indent=2))
    print()
示例#10
0
def main():
    import pprint
    import json
    from tatsu import parse
    from tatsu.util import asjson
    ast = parse(GRAMMAR, '(-b + ((b * b) - (4 * a * c)) ** (1/2)) / (2 * a)')
    #  ast = parse(GRAMMAR, '-b')
    print('PPRINT')
    pprint.pprint(ast, indent=2, width=20)
    print()
    print('JSON')
    print(json.dumps(asjson(ast), indent=2))
    print()
示例#11
0
def main(sen):
    import pprint
    import json
    from tatsu import parse
    from tatsu.util import asjson
    # test sen = (-b + ((b * b) - (4 * a * c)) ^ (1/2)) / (2 * a)
    ast = parse(GRAMMAR, sen)
    print('PPRINT')
    pprint.pprint(ast, indent=2, width=30)
    print()
    print('JSON')
    print(json.dumps(asjson(ast), indent=2))
    print()
示例#12
0
def main(sen):
    import pprint
    import json
    from tatsu import parse
    from tatsu.util import asjson
    ast = parse(GRAMMAR, sen)
    print('PPRINT')
    pprint.pprint(ast, indent=2, width=30)
    print()
    print('JSON VALUE')
    print('odedoyin matthew')
    print(json.dumps(asjson(ast), indent=2))
    print()
示例#13
0
def main():
    import pprint
    import json
    from tatsu import parse
    from tatsu.util import asjson

    with open(dir_path + '/../bnf/cryo-lang.ebnf') as f:
        ast = parse(f.read(), sys.stdin.read())
    print('PPRINT')
    pprint.pprint(ast, indent=2, width=20)
    print()

    print('JSON')
    print(json.dumps(asjson(ast), indent=2))
    print()
示例#14
0
    def handle_list_events_intent(self, message):
        utt = message.data['utterance']
        utt = normalize(utt).replace("'s", "")  # normalize and drop "****'s"
        try:
            ast = self.PEGParser.parse(
                utt)  # parse utterance for owner and time frame
            parsed_utt = asjson(ast)

            calendar_owner = parsed_utt.get(
                'calendar_owner')  # use .get() to return None if key not found
            calendar_timeframe = parsed_utt.get(
                'time_frame'
            )  # rather than error-ing out on a failed ['<key>']
        except Exception as e:
            self.speak('there was an error parsing your utternace')
            self.log.error(e)
            return

        if calendar_owner == None:  # if owner not found, default to personal calendar
            calendar_owner = 'my'

        # parser will return list if timeframe is two words (e.g. ["this", "weekend"]) and convertSpokenTimeRangeToDT
        # takes a string as input, so join with space if calendar_timeframe is a list
        if type(calendar_timeframe) == list:
            calendar_timeframe = ' '.join(calendar_timeframe)

        start, end = self.convertSpokenTimeRangeToDT(
            calendar_timeframe
        )  # generate the start and end times for the event search

        url, user, password = self.getConfigs()  # get config settings
        calendarObj = self.getCalendar(
            self.
            nameToCalendar[calendar_owner],  # construct caldav calendar object
            url,
            user,
            password)
        events = self.searchEvents(
            calendarObj, start,
            end)  # get list of events between start and end
        self.speakEvents(events)  # speak those events
示例#15
0
 def asjson(self):
     return asjson(self)
示例#16
0
        return ast

    def eof(self, ast):  # noqa
        return ast


def main(filename, start=None, **kwargs):
    if start is None:
        start = 'start'
    if not filename or filename == '-':
        text = sys.stdin.read()
    else:
        with open(filename) as f:
            text = f.read()
    parser = EBNFBootstrapParser()
    return parser.parse(
        text,
        rule_name=start,
        filename=filename,
        **kwargs
    )


if __name__ == '__main__':
    import json
    from tatsu.util import asjson

    ast = generic_main(main, EBNFBootstrapParser, name='EBNFBootstrap')
    data = asjson(ast)
    print(json.dumps(data, indent=2))
示例#17
0
 def __json__(self):
     return {name: asjson(value) for name, value in self.items()}
示例#18
0
    def monthconstraint(self, ast):  # noqa
        return ast

    def startingat(self, ast):  # noqa
        return ast


def main(filename, start=None, **kwargs):
    if start is None:
        start = 'start'
    if not filename or filename == '-':
        text = sys.stdin.read()
    else:
        with open(filename) as f:
            text = f.read()
    parser = UnknownParser()
    return parser.parse(text, rule_name=start, filename=filename, **kwargs)


if __name__ == '__main__':
    import json
    from tatsu.util import asjson

    ast = generic_main(main, UnknownParser, name='Unknown')
    print('AST:')
    print(ast)
    print()
    print('JSON:')
    print(json.dumps(asjson(ast), indent=2))
    print()
示例#19
0
 def __json__(self):
     return asjson({'__class__': type(self).__name__, **self._pubdict()})
        return ast

    def regex(self, ast):  # noqa
        return ast

    def boolean(self, ast):  # noqa
        return ast

    def eof(self, ast):  # noqa
        return ast


def main(filename, startrule, **kwargs):
    with open(filename) as f:
        text = f.read()
    parser = EBNFBootstrapParser()
    return parser.parse(text, startrule, filename=filename, **kwargs)


if __name__ == '__main__':
    import json
    from tatsu.util import asjson

    ast = generic_main(main, EBNFBootstrapParser, name='EBNFBootstrap')
    print('AST:')
    print(ast)
    print()
    print('JSON:')
    print(json.dumps(asjson(ast), indent=2))
    print()
示例#21
0
 def __json__(self):
     result = collections.OrderedDict(
         __class__=self.__class__.__name__,
     )
     result.update(self._pubdict())
     return asjson(result)
 def __json__(self):
     result = collections.OrderedDict(
         __class__=self.__class__.__name__,
     )
     result.update(self._pubdict())
     return asjson(result)
示例#23
0
    def handle_add_event_intent(self, message):
        utt = message.data['utterance']
        time_delta, remaining_utt = extract_duration(
            utt)  # get time duration from utterance
        start_time, remaining_utt = extract_datetime(
            remaining_utt)  # get time from utterance
        owner = message.data.get('Owner')  # get calendar owner
        utt = normalize(utt).replace("'s",
                                     "")  # normalize and drop 's in utterance
        parsed_utt = asjson(
            self.PEGParser.parse(utt))  # parse utterance for owner
        owner = parsed_utt.get('calendar_owner')
        if owner is None:  # if parser failed to get owner, prompt user
            owner = self.get_response('ask.calendar.owner')

        self.log.info('using owner: {}'.format(owner))

        try:  # get the calendar belonging to owner
            calName = self.nameToCalendar[owner]  # throw error if none found
        except KeyError:
            self.speak_dialog('no.calendar.found.error', {'name': owner})
            return

        if start_time is None:  # if start time not found
            start_time, _ = extract_datetime(
                self.get_response('ask.start.time'))  # ask the user

        if time_delta is None:  # if duration not found
            time_delta, _ = extract_duration(
                self.get_response('ask.duration'))  # ask the user

        if time_delta is None or start_time is None:  # if duration of start time STILL unkonwn
            self.speak(
                'sorry. i was not able to understand. please start over.'
            )  # fail
            return

        end_time = start_time + time_delta  # calculate end time

        eventName = self.get_response(
            'ask.event.name').title()  # ask for event name

        confirmation = self.ask_yesno(
            'confirm.event',  # confirm details
            {
                'event_name': eventName,
                'confirmation_text': self.confirmEventDetails(
                    start_time, end_time),
                'owner': self.calendarToName[calName]
            })
        if confirmation == 'no':
            self.speak_dialog('confirmation.failed')

        elif confirmation == 'yes':
            url, user, password = self.getConfigs()  # get  configs
            if url is None:  # if getConfigs returned None, it failed and
                pass  # already spoke to user
            else:
                calendar = self.getCalendar(
                    calName, url, user,
                    password)  # get calendar and create the event
                self.makeEvent(calendar,
                               start_time,
                               end_time,
                               eventName,
                               owner=self.calendarToName[calName])
        else:
            self.speak('sorry i did not understand.')
示例#24
0
 def __json__(self):
     result = super().__json__()
     result['bases'] = asjson([b.qualname() for b in self.bases])
     return result
示例#25
0
 def __json__(self):
     return odict([(name, asjson(symbols)) for name, symbols in self.entries.items()])
示例#26
0
 def __json__(self):
     return dict([
         ('node', type(self.node).__name__),
         ('entries', super().__json__()),
         ('references', asjson(self._references)),
     ])
示例#27
0
 def __json__(self):
     return {
         name: asjson(symbols)
         for name, symbols in self.entries.items()
     }
示例#28
0
#!/usr/bin/env python3

import json

from tatsu import parse
from tatsu.util import asjson

slurp = lambda filename: [(f.read(), f.close())
                          for f in [open(filename, 'r')]][0][0]

GRAMMAR = slurp("docopt.peg")

_indent = 4

usage = 'Usage: hello -abc --why <file>'
usage = 'Usage: hello <file>'

ast = parse(GRAMMAR, usage)

print(json.dumps(asjson(ast), indent=_indent))

#
示例#29
0
    def test_bootstrap(self):
        print()

        if os.path.isfile('./tmp/00.ast'):
            shutil.rmtree('./tmp')
        if not os.path.isdir('./tmp'):
            os.mkdir('./tmp')
        print('-' * 20, 'phase 00 - parse using the bootstrap grammar')
        with open('grammar/tatsu.ebnf') as f:
            text = str(f.read())
        g = EBNFParser('EBNFBootstrap')
        grammar0 = g.parse(text)
        ast0 = json.dumps(asjson(grammar0), indent=2)
        with open('./tmp/00.ast', 'w') as f:
            f.write(ast0)

        print('-' * 20, 'phase 01 - parse with parser generator')
        with open('grammar/tatsu.ebnf') as f:
            text = str(f.read())
        g = GrammarGenerator('EBNFBootstrap')
        g.parse(text)

        generated_grammar1 = str(g.ast['start'])
        with open('./tmp/01.ebnf', 'w') as f:
            f.write(generated_grammar1)

        print('-' * 20, 'phase 02 - parse previous output with the parser generator')
        with open('./tmp/01.ebnf') as f:
            text = str(f.read())
        g = GrammarGenerator('EBNFBootstrap')
        g.parse(text)
        generated_grammar2 = str(g.ast['start'])
        with open('./tmp/02.ebnf', 'w') as f:
            f.write(generated_grammar2)
        self.assertEqual(generated_grammar2, generated_grammar1)

        print('-' * 20, 'phase 03 - repeat')
        with open('./tmp/02.ebnf') as f:
            text = f.read()
        g = EBNFParser('EBNFBootstrap')
        ast3 = g.parse(text)
        with open('./tmp/03.ast', 'w') as f:
            f.write(json.dumps(asjson(ast3), indent=2))

        print('-' * 20, 'phase 04 - repeat')
        with open('./tmp/02.ebnf') as f:
            text = f.read()
        g = GrammarGenerator('EBNFBootstrap')
        g.parse(text)
        parser = g.ast['start']
    #    pprint(parser.first_sets, indent=2, depth=3)
        generated_grammar4 = str(parser)
        with open('./tmp/04.ebnf', 'w') as f:
            f.write(generated_grammar4)
        self.assertEqual(generated_grammar4, generated_grammar2)

        print('-' * 20, 'phase 05 - parse using the grammar model')
        with open('./tmp/04.ebnf') as f:
            text = f.read()
        ast5 = parser.parse(text)
        with open('./tmp/05.ast', 'w') as f:
            f.write(json.dumps(asjson(ast5), indent=2))

        print('-' * 20, 'phase 06 - generate parser code')
        gencode6 = codegen(parser)
        with open('./tmp/g06.py', 'w') as f:
            f.write(gencode6)

        print('-' * 20, 'phase 07 - import generated code')
        py_compile.compile('./tmp/g06.py', doraise=True)
        # g06 = __import__('g06')
        # GenParser = g06.EBNFBootstrapParser

        # print('-' * 20, 'phase 08 - compile using generated code')
        # parser = GenParser(trace=False)
        # result = parser.parse(
        #     text,
        #     'start',
        #     comments_re=COMMENTS_RE,
        #     eol_comments_re=EOL_COMMENTS_RE
        # )
        # self.assertEqual(result, parser.ast['start'])
        # ast8 = parser.ast['start']
        # json8 = json.dumps(asjson(ast8), indent=2)
        # open('./tmp/08.ast', 'w').write(json8)
        # self.assertEqual(ast5, ast8)

        print('-' * 20, 'phase 09 - Generate parser with semantics')
        with open('grammar/tatsu.ebnf') as f:
            text = f.read()
        parser = GrammarGenerator('EBNFBootstrap')
        g9 = parser.parse(text)
        generated_grammar9 = str(g9)
        with open('./tmp/09.ebnf', 'w') as f:
            f.write(generated_grammar9)
        self.assertEqual(generated_grammar9, generated_grammar1)

        print('-' * 20, 'phase 10 - Parse with a model using a semantics')
        g10 = g9.parse(
            text,
            start_rule='start',
            semantics=EBNFGrammarSemantics('EBNFBootstrap')
        )
        generated_grammar10 = str(g10)
        with open('./tmp/10.ebnf', 'w') as f:
            f.write(generated_grammar10)
        gencode10 = codegen(g10)
        with open('./tmp/g10.py', 'w') as f:
            f.write(gencode10)

        print('-' * 20, 'phase 11 - Pickle the model and try again.')
        with open('./tmp/11.tatsu', 'wb') as f:
            pickle.dump(g10, f, protocol=2)
        with open('./tmp/11.tatsu', 'rb') as f:
            g11 = pickle.load(f)
        r11 = g11.parse(
            text,
            start_rule='start',
            semantics=EBNFGrammarSemantics('EBNFBootstrap')
        )
        with open('./tmp/11.ebnf', 'w') as f:
            f.write(str(g11))
        gencode11 = codegen(r11)
        with open('./tmp/g11.py', 'w') as f:
            f.write(gencode11)

        print('-' * 20, 'phase 12 - Walker')

        class PrintNameWalker(DepthFirstWalker):
            def __init__(self):
                self.walked = []

            def walk_default(self, o, children):
                self.walked.append(o.__class__.__name__)

        v = PrintNameWalker()
        v.walk(g11)
        with open('./tmp/12.txt', 'w') as f:
            f.write('\n'.join(v.walked))

        # note: pygraphviz not yet updated
        if sys.version_info >= (3, 7):
            return

        print('-' * 20, 'phase 13 - Graphics')
        try:
            from tatsu.diagrams import draw
        except ImportError:
            print('PyGraphViz not found!')
        else:
            if not util.PY37:
                draw('./tmp/13.png', g11)
示例#30
0
文件: ast.py 项目: sdcloudt/TatSu
 def __json__(self):
     return {
         asjson(k): asjson(v)
         for k, v in self.items() if not k.startswith('_')
     }
示例#31
0
 def __json__(self):
     return {
         asjson(k): asjson(v)
         for k, v in self.items() if not k.startswith('_')
     }
 def asjson(self):
     return asjson(self)