def test_recognizer_for_unexisting_terminal_raises_exception():
    """
    If a recognizer is given for a terminal that can't be found in the grammar
    raise an exception.
    """

    with pytest.raises(GrammarError,
                       match=r'.*given for unknown terminal "B".'):
        Grammar.from_file(join(dirname(__file__), 'grammar.pg'))
Exemple #2
0
def test_imported_recognizers_error_undefined_recognizer():

    with pytest.raises(GrammarError,
                       match=r'has no recognizer defined and no recognizers '
                       'are given'):
        Grammar.from_file(os.path.join(this_folder, 'model.pg'))

    # If we define COMMA recognizer grammar will construct without exceptions.
    g = Grammar.from_file(os.path.join(this_folder, 'model.pg'),
                          recognizers={'base.COMMA': comma_recognizer})
    assert g
def test_imported_actions_override_by_grammar_actions():
    """
    Test that actions loaded from `*_actions.py` files can override actions
    imported from other grammar files.
    """

    g = Grammar.from_file(
        os.path.join(this_folder, 'in_grammar_by_symbol_name/model.pg'))
    model = Parser(g).parse(model_str)
    assert model.modelID == 43

    g = Grammar.from_file(
        os.path.join(this_folder, 'in_grammar_by_action_name/model.pg'))
    model = Parser(g).parse(model_str)
    assert model.modelID == 43
def test_recognizer_explicit_get_collector_missing_recognizer():
    """
    Test when `get_collector` has a terminal without defined recognizer an
    exception is raised.
    """

    recognizer = get_collector()

    @recognizer
    def INT(input, pos):
        return re.compile(r'\d+').match(input[pos:])

    with pytest.raises(GrammarError,
                       match=r'Terminal "STRING" has no recognizer defined.'):
        Grammar.from_file(os.path.join(THIS_FOLDER, 'grammar.pg'),
                          recognizers=recognizer.all)
def test_recognizer_explicit_get_collector_recognizer_for_unexisting_terminal(
):  # noqa
    """
    Test for situation when `get_collector` has a recognizer for un-existing
    terminal.
    """

    recognizer = get_collector()

    @recognizer
    def INT(input, pos):
        return re.compile(r'\d+').match(input[pos:])

    @recognizer
    def STRING(input, pos):
        return re.compile(r'\d+').match(input[pos:])

    @recognizer
    def STRING2(input, pos):
        return re.compile(r'\d+').match(input[pos:])

    grammar = Grammar.from_file(os.path.join(THIS_FOLDER, 'grammar.pg'),
                                recognizers=recognizer.all)
    parser = Parser(grammar)
    assert parser
Exemple #6
0
def test_grammar_with_unicode():
    this_folder = os.path.dirname(__file__)
    grammar = Grammar.from_file(os.path.join(this_folder, "names.pg"))
    parser = Parser(grammar, consume_input=False)
    inp = 'МИША МЫЛ РАМУ'
    result = parser.parse(inp)
    assert result
Exemple #7
0
def refinementChecker(scriptFile):
	try:
		g = Grammar.from_file("grammar")
		parser = Parser(g, actions=actions)
	except Exception as e:
		print e
		print "Parse generation: Failed."
		print "Terminating."
		sys.exit()
	print "Parser generation: Done."
	try:
		script = parser.parse_file(scriptFile)
		print "Parse input: Done."
	except Exception as e:
		print e
		print "Parse input: Failed."
		print "Terminating."
		sys.exit()
	try:
		execute(script)
	except Exception as e:
		print e
		print "Script execution: Failed."
		print "Terminating."
		sys.exit()
	print "Script execution: Done."
	print "Terminating."
    def __init__(self, input_file_path, rule_file_path):
        super().__init__()

        root_path = get_root_path()

        self.user_lib = importlib.import_module('model_converter.'
                                                'user_libs.functions',
                                                'functions')

        self.input_grammar = \
            Grammar.from_file(os.path.join(root_path,
                                           'converter',
                                           'grammars',
                                           'XMLGrammar.pg'))

        self.input_parser = Parser(self.input_grammar)

        self.input_file_path = input_file_path
        self.rule_file_path = rule_file_path

        self.conversion_dict = {"Subsystem": []}

        self.temp_subsystem_dict = {}

        self.path_dict = {}

        self.file_input = None
Exemple #9
0
def check_get_grammar_table(grammar_file, debug, colors):
    try:
        g = Grammar.from_file(grammar_file,
                              _no_check_recognizers=True,
                              debug_colors=colors)
        if debug:
            g.print_debug()
        table = create_table(g)
        if debug:
            table.print_debug()

        h_print("Grammar OK.")
        if table.sr_conflicts:
            a_print("There are {} Shift/Reduce conflicts.".format(
                len(table.sr_conflicts)))
            prints("Either use 'prefer_shifts' parser mode, try to resolve "
                   "manually or use GLR parsing.".format(
                       len(table.sr_conflicts)))
        if table.rr_conflicts:
            a_print("There are {} Reduce/Reduce conflicts.".format(
                len(table.rr_conflicts)))
            prints("Try to resolve manually or use GLR parsing.")

        if (table.sr_conflicts or table.rr_conflicts) and not debug:
            prints("Run in debug mode to print all the states.")

    except (GrammarError, ParseError) as e:
        print("Error in the grammar file.")
        print(e)
        sys.exit(1)

    return g, table
Exemple #10
0
def create_parser(grammar_path, actions):
    return Parser(
        Grammar.from_file(grammar_path),
        debug=get_boolean_environ('DEBUG'),
        debug_layout=get_boolean_environ('DEBUG_LEVEL'),
        actions=actions
    )
Exemple #11
0
def cpu_tests():
    results = []
    for test_idx, test in enumerate(TESTS):
        for parsing in ['LR', 'GLR']:
            if ((not test.lr and parsing == 'LR')
                    or (not test.glr and parsing == 'GLR')):
                continue

            parser_class = Parser if parsing == 'LR' else GLRParser
            for input_idx in range(INPUTS):
                result = TestResult(f'{test.name} {parsing}')
                result.input_idx = input_idx + 1
                test_root = join(dirname(__file__), f'test{test_idx+1}')
                file_name = join(test_root, f'input{input_idx+1}')
                result.size = getsize(file_name)

                g = Grammar.from_file(join(test_root, 'g.pg'))
                parser = parser_class(g)
                result.nonterminals = len(g.nonterminals)
                result.productions = len(g.productions)
                result.states = len(parser.table.states)

                with io.open(file_name, 'r', encoding='utf-8') as f:
                    content = f.read()
                gcold = gc.isenabled()
                gc.disable()
                try:
                    t_start = time.time()
                    for i in range(REPEAT):
                        forest = parser.parse(content)
                    t_end = time.time()
                finally:
                    if gcold:
                        gc.enable()

                result.time = t_end - t_start
                result.speed = int(result.size / (t_end - t_start) * REPEAT)
                if parsing == 'GLR':
                    result.ambig = forest.ambiguities

                results.append(result)

    with open(join(dirname(__file__), 'reports', 'cpu-report.txt'), 'w') as f:
        inputs = '|'.join(f'    I{i+1}   ' for i in range(INPUTS))
        f.write(f'|               |{inputs}|\n')
        previous_name = 'None'
        for name, results in groupby(results, lambda r: r.name):
            results = list(results)
            if not name.startswith(previous_name):
                sizes_str = '|'.join(f'{r.size:^9,d}' for r in results)
                title = '{:15s}'.format(name[:-3] + ' sizes')
                f.write(f'|{title}|{sizes_str}|\n')
            results_str = '|'.join(f'{r.speed:^9,d}' for r in results)
            f.write(f'|{name:15s}|{results_str}|\n')
            if name.endswith('GLR'):
                ambig_str = '|'.join(f'{r.ambig:^9,d}' for r in results)
                title = '{:15s}'.format(name[:-4] + ' ambig')
                f.write(f'|{title}|{ambig_str}|\n')
            previous_name = ''.join(name.split()[:-1])
 def __init__(self, filepath, debug=False):
     self.grammar_file = os.path.join(this_folder, 'ConfigFileGrammer.pg')
     self.grammer_obj = Grammar.from_file(self.grammar_file, debug=debug)
     self.parser = Parser(self.grammer_obj, build_tree=True, debug=debug)
     self.config_file = open(filepath, 'r')
     self.parse_result = self.parser.parse(self.config_file.read())
     self.IdValueList = []
     self.collect_list(self.parse_result, "")
Exemple #13
0
def test_fqn_constructed_by_first_import_path():

    g = Grammar.from_file(os.path.join(this_folder, 'A.pg'))

    assert g.get_terminal('B.C.CTerm')
    assert not g.get_terminal('C.CTerm')
    assert g.get_nonterminal('B.C.CRule')
    assert not g.get_nonterminal('C.CRule')
def test_diamond_import_resolving_and_model_creation():
    g = Grammar.from_file(os.path.join(this_folder, 'model.pg'))
    assert g
    assert g.get_terminal('packages.components.base.COMMA')
    assert g.get_nonterminal('Model')

    # First path used for import of Component is going
    # packages->components->Component
    component_nonterminal = g.get_nonterminal('packages.components.Component')
    assert component_nonterminal

    input_str = '''

    package First
    package Second {
        component packageComponent {

        }
    }

    module SomeModule {

        component myComponent {
            in SomeInputSlot
            out SomeOutputSlot
        }


    }

    '''

    model = Parser(g).parse(input_str)
    assert model
    assert model.__class__.__name__ == 'Model'
    assert type(model.packages) is list
    assert len(model.packages) == 2
    assert model.packages[0].name == 'First'
    assert type(model.modules) is list
    assert len(model.modules) == 1

    packageComponent = model.packages[1].body.components[0]
    assert packageComponent.name == 'packageComponent'

    module = model.modules[0]
    assert module.__class__.__name__ == 'm.Module'
    assert module.name == 'SomeModule'
    assert len(module.components) == 1

    component = module.components[0]
    assert type(component) == type(packageComponent)
    assert component.name == 'myComponent'
    assert len(component.slots) == 2

    slot = component.slots[1]
    assert slot.__class__.__name__ == 'packages.components.SlotOut'
    assert slot.name == 'SomeOutputSlot'
Exemple #15
0
    def from_file(self, filename, cleaned=True, **kwargs):
        actions = self.actions if cleaned else None
        g = Grammar.from_file(self.grammar, **kwargs)
        parser = Parser(g, actions=actions, build_tree=self.build_tree, **kwargs)

        result = parser.parse_file(file_name=filename)
        self.fetch_tree(result)

        return result
Exemple #16
0
    def from_string(self, input_str, cleaned=True, **kwargs):
        actions = self.actions if cleaned else None
        g = Grammar.from_file(self.grammar, **kwargs)
        parser = Parser(g, actions=actions, build_tree=self.build_tree, **kwargs)

        result = parser.parse(input_str)
        self.fetch_tree(result)

        return result
Exemple #17
0
def main(debug=False):
    this_folder = os.path.dirname(__file__)
    g = Grammar.from_file(os.path.join(this_folder, 'json.pg'))
    parser = Parser(g, debug=debug, debug_colors=True)

    for i in range(5):
        result = parser.parse_file(
            os.path.join(this_folder, f'example{i+1}.json'))
        print(result)
Exemple #18
0
def main(debug=False):
    this_folder = os.path.dirname(__file__)
    g = Grammar.from_file(os.path.join(this_folder, 'robot.pg'),
                          debug=debug, debug_colors=True)
    parser = Parser(g, actions=action.all, debug=debug,
                    debug_colors=True)

    end_position = parser.parse_file(os.path.join(this_folder, 'program.rbt'))

    print("Robot stops at position: {}".format(end_position))
Exemple #19
0
def main(debug=False):

    this_folder = os.path.dirname(__file__)
    grammar_file = os.path.join(this_folder, 'rhapsody.pg')
    g = Grammar.from_file(grammar_file, debug=debug, debug_colors=True)
    parser = Parser(g, build_tree=True, debug=debug, debug_colors=True)

    with open(os.path.join(this_folder, 'LightSwitch.rpy'), 'r') as f:
        result = parser.parse(f.read())
        print(result.to_str())
Exemple #20
0
def test_imported_recognizers_override():
    """
    Test that recognizers loaded from `*_recognizers.py` files can be
    overriden by users provided recognizers.
    """

    called = [False, False]

    def numeric_id(input, pos):
        called[0] = True

    def fqn(input, pos):
        called[0] = True

    recognizers = {
        'base.COMMA': comma_recognizer,
        'base.NUMERIC_ID': numeric_id,
        'base.FQN': fqn
    }

    g = Grammar.from_file(os.path.join(this_folder, 'model.pg'),
                          recognizers=recognizers)
    assert g
    with pytest.raises(ParseError):
        Parser(g).parse(model_str)
    assert any(called)

    called = [False]

    def numeric_id(input, pos):
        called[0] = True
        return number(input, pos)

    recognizers = {
        'base.COMMA': comma_recognizer,
        'base.NUMERIC_ID': numeric_id,
    }

    g = Grammar.from_file(os.path.join(this_folder, 'model.pg'),
                          recognizers=recognizers)
    assert g
    Parser(g).parse(model_str)
    assert called[0]
Exemple #21
0
def test_save_load_table():
    """
    Test basic table save/load cycle with table file creation.
    """
    calc_file = os.path.join(this_folder, 'calc.pg')
    variable_file = os.path.join(this_folder, 'variable.pg')
    input_str = 'a = 5   1 + 2 * a - 7'
    input_str_result = 1 + 2 * 5 - 7
    grammar = Grammar.from_file(calc_file)

    table_file = os.path.join(this_folder, 'calc.pgt')
    # remove table file if exists
    try:
        os.remove(table_file)
    except OSError:
        pass

    parser = Parser(grammar)
    assert parser.parse(input_str) == input_str_result

    # Table file must be produced by parser construction.
    assert os.path.exists(table_file)

    last_mtime = os.path.getmtime(table_file)
    time.sleep(1)

    parser = Parser(grammar)

    # Last generated table should be used during parser construction.
    # Currently, it is hard to check this so we'll only check if
    # table_file is not regenerated.
    assert last_mtime == os.path.getmtime(table_file)
    # Parser constructed from persisted table should produce the same result.
    assert parser.parse(input_str) == input_str_result

    # We are now touching variable.pg file
    # This should trigger table file regeneration
    with open(variable_file, 'a'):
        os.utime(variable_file, None)
    parser = Parser(grammar)
    assert parser.parse(input_str) == input_str_result
    # We verify that the table file is newer.
    assert last_mtime < os.path.getmtime(table_file)

    # Now we test that force_load_table will load table even if not
    # newer than the grammar.
    time.sleep(1)
    with open(variable_file, 'a'):
        os.utime(variable_file, None)
    last_mtime = os.path.getmtime(table_file)
    parser = Parser(grammar, force_load_table=True)
    assert last_mtime == os.path.getmtime(table_file)
    parser = Parser(grammar)
    assert last_mtime < os.path.getmtime(table_file)
Exemple #22
0
def compile_alarm_python_condition(alarm_str, log_format):
    g = Grammar.from_file('alarm_language.pg')
    # no actions for now
    p = Parser(g, actions=actions)

    res = p.parse(alarm_str)
    # print(res)
    res.remove_not()
    res.semantic_analysis(build_log_parser(log_format))
    res = res.python_condition()
    return res
Exemple #23
0
    def from_stream(self, source, cleaned=True, **kwargs):
        actions = self.actions if cleaned else None
        g = Grammar.from_file(self.grammar, **kwargs)
        parser = Parser(g, actions=actions, build_tree=self.build_tree, **kwargs)

        with open(source, "r") as f:
            result = parser.parse(f.read())

        self.fetch_tree(result)

        return result
Exemple #24
0
def main(debug=False):
    this_folder = os.path.dirname(__file__)
    g = Grammar.from_file(os.path.join(this_folder, 'c.pg'),
                          re_flags=re.MULTILINE | re.VERBOSE)
    parser = GLRParser(g, debug=debug, debug_colors=True)

    # The input is C code after preprocessing
    forest = parser.parse_file(os.path.join(this_folder, 'example.c'))

    print('Solutions: ', len(forest))
    print('Ambiguities: ', forest.ambiguities)
def run():
    g = Grammar.from_file('rhapsody.pg')

    this_folder = dirname(__file__)
    parser = Parser(g)

    # Small file
    parser.parse_file(join(this_folder, 'test_inputs', 'LightSwitch.rpy'))

    # Large file
    parser.parse_file(join(this_folder, 'test_inputs',
                           'LightSwitchDouble.rpy'))
Exemple #26
0
def parse(ctx, grammar_file, input_file, input, glr, recovery, dot, positions):
    if not (input_file or input):
        prints('Expected either input_file or input string.')
        sys.exit(1)
    colors = ctx.obj['colors']
    debug = ctx.obj['debug']
    prefer_shifts = ctx.obj['prefer_shifts']
    prefer_shifts_over_empty = ctx.obj['prefer_shifts_over_empty']
    grammar = Grammar.from_file(grammar_file, debug=debug, debug_colors=colors)
    if glr:
        parser = GLRParser(grammar,
                           debug=False,
                           debug_colors=colors,
                           error_recovery=recovery,
                           prefer_shifts=prefer_shifts,
                           prefer_shifts_over_empty=prefer_shifts_over_empty)
    else:
        parser = Parser(grammar,
                        build_tree=True,
                        debug=False,
                        debug_colors=colors,
                        error_recovery=recovery,
                        prefer_shifts=prefer_shifts,
                        prefer_shifts_over_empty=prefer_shifts_over_empty)

    if input:
        result = parser.parse(input)
    else:
        result = parser.parse_file(input_file)

    if glr:
        print(f'Solutions:{result.solutions}')
        print(f'Ambiguities:{result.ambiguities}')

    if recovery:
        print(f'Errors: {len(parser.errors)}')
        for error in parser.errors:
            print('\t', str(error))

    if glr and result.solutions > 1:
        print('Printing the forest:\n')
        result = result
    else:
        print('Printing the parse tree:\n')

    print(result.to_str())

    if dot:
        f_name = 'forest.dot' if glr and result.solutions > 1 else 'tree.dot'
        with open(f_name, 'w') as f:
            f.write(result.to_dot(positions))
        print('Created dot file ', f_name)
Exemple #27
0
def mem_tests():
    results = []
    for test_idx, test in enumerate(TESTS):
        for parsing in ['LR', 'GLR']:
            if ((not test.lr and parsing == 'LR')
                    or (not test.glr and parsing == 'GLR')):
                continue

            parser_class = Parser if parsing == 'LR' else GLRParser
            for input_idx in range(INPUTS):
                result = TestResult(f'{test.name} {parsing}')
                result.input_idx = input_idx + 1
                test_root = join(dirname(__file__), f'test{test_idx+1}')
                file_name = join(test_root, f'input{input_idx+1}')
                result.size = getsize(file_name)

                g = Grammar.from_file(join(test_root, 'g.pg'))
                parser = parser_class(g)

                with io.open(file_name, 'r', encoding='utf-8') as f:
                    content = f.read()

                gc.collect()
                tracemalloc.start()
                forest = parser.parse(content)
                _, peak = tracemalloc.get_traced_memory()
                result.mem = peak // 1000
                tracemalloc.stop()

                if parsing == 'GLR':
                    result.ambig = forest.ambiguities

                results.append(result)

    with open(join(dirname(__file__), 'reports', 'mem-report.txt'), 'w') as f:
        inputs = '|'.join(f'    I{i+1}   ' for i in range(INPUTS))
        f.write(f'|               |{inputs}|\n')
        previous_name = 'None'
        for name, results in groupby(results, lambda r: r.name):
            results = list(results)
            if not name.startswith(previous_name):
                sizes_str = '|'.join(f'{r.size:^9,d}' for r in results)
                title = '{:15s}'.format(name[:-3] + ' sizes')
                f.write(f'|{title}|{sizes_str}|\n')
            results_str = '|'.join(f'{r.mem:^9,d}' for r in results)
            f.write(f'|{name:15s}|{results_str}|\n')
            if name.endswith('GLR'):
                ambig_str = '|'.join(f'{r.ambig:^9,d}' for r in results)
                title = '{:15s}'.format(name[:-4] + ' ambig')
                f.write(f'|{title}|{ambig_str}|\n')
            previous_name = ''.join(name.split()[:-1])
Exemple #28
0
def test_imported_recognizers_override_by_importing_grammar_file():
    """
    Test that recognizers loaded from `*_recognizers.py` files can be
    overriden in importing grammar `*_recognizers.py` file by providing
    FQN of the imported terminal relative from the importing grammar file.
    """

    g = Grammar.from_file(os.path.join(this_folder, 'model_override.pg'))
    assert g

    t = g.get_terminal('base.NUMERIC_ID')
    assert t is not None

    assert t.recognizer.__doc__ == 'Check override'
Exemple #29
0
def test_imported_recognizers_connect_from_external_file():
    g = Grammar.from_file(os.path.join(this_folder, 'model.pg'),
                          recognizers={'base.COMMA': comma_recognizer})

    # Check that recognizers are loaded and connected.
    rec_fqn = g.get_terminal('base.FQN')
    assert rec_fqn.recognizer
    assert type(rec_fqn.recognizer) is FunctionType
    assert rec_fqn.recognizer.__name__ == 'FQN'

    rec_fqn = g.get_terminal('base.NUMERIC_ID')
    assert rec_fqn.recognizer
    assert type(rec_fqn.recognizer) is FunctionType
    assert rec_fqn.recognizer.__name__ == 'number'
def test_imported_actions_override():
    """
    Test that actions loaded from `*_actions.py` files can be overriden by
    users actions.
    """

    # We can override either by fqn of symbol
    g = Grammar.from_file(os.path.join(this_folder, 'by_symbol_name/model.pg'))
    actions = {'base.NUMERIC_ID': lambda _, value: 43}
    model = Parser(g, actions=actions).parse(model_str)
    assert model.modelID == 43

    # Or by action name used in grammar for the given symbol
    g = Grammar.from_file(os.path.join(this_folder, 'by_action_name/model.pg'))
    actions = {'base.number': lambda _, value: 43}
    model = Parser(g, actions=actions).parse(model_str)
    assert model.modelID == 43

    # Override by FQN takes precendence
    g = Grammar.from_file(os.path.join(this_folder, 'by_action_name/model.pg'))
    actions = {'base.NUMERIC_ID': lambda _, value: 43}
    model = Parser(g, actions=actions).parse(model_str)
    assert model.modelID == 43