示例#1
0
def print_lex(text: str):
    lexer = DemystifyLexer(InputStream(text))
    stream = CommonTokenStream(lexer)
    stream.fill()
    for token in stream.tokens:
        print('%s: %s' %
              (token.text, DemystifyLexer.symbolicNames[token.type]))
示例#2
0
    def compileTreePattern(self, pattern: str, patternRuleIndex: int):
        tokenList = self.tokenize(pattern)
        tokenSrc = ListTokenSource(tokenList)
        tokens = CommonTokenStream(tokenSrc)
        from antlr4.ParserInterpreter import ParserInterpreter
        parserInterp = ParserInterpreter(self.parser.grammarFileName,
                                         self.parser.tokenNames,
                                         self.parser.ruleNames,
                                         self.parser.getATNWithBypassAlts(),
                                         tokens)
        tree = None
        try:
            parserInterp.setErrorHandler(BailErrorStrategy())
            tree = parserInterp.parse(patternRuleIndex)
        except ParseCancellationException as e:
            raise e.cause
        except RecognitionException as e:
            raise e
        except Exception as e:
            raise CannotInvokeStartRule(e)

        # Make sure tree pattern compilation checks for a complete parse
        if tokens.LA(1) != Token.EOF:
            raise StartRuleDoesNotConsumeFullPattern()

        from antlr4.tree.ParseTreePattern import ParseTreePattern
        return ParseTreePattern(self, pattern, patternRuleIndex, tree)
示例#3
0
    def test_antecedents_terms_have_correct_mf_values_using_singleton_and_piecewise(self):
        fcl_text = """
        FUNCTION_BLOCK my_system
            FUZZIFY antecedent1
                TERM mf1 := 4.0;
                TERM mf2 := (0, 0.2) (2, 0) (3, 1);
                TERM mf3 := 1.0;
            END_FUZZIFY
        END_FUNCTION_BLOCK
        """
        lexer = FclLexer(InputStream(fcl_text))
        stream = CommonTokenStream(lexer)
        parser = FclParser(stream)
        tree = parser.main()

        listener = ScikitFuzzyFclListener()
        walker = ParseTreeWalker()
        walker.walk(listener, tree)
        antecedent = listener.antecedents.get('antecedent1').get('value')
        term = antecedent['mf1']
        expected_mf_value = np.asarray([0, 0, 0, 0, 1])  # fx[0], fx[1], fx[2], fx[3], f[4]
        np.testing.assert_array_equal(expected_mf_value, term.mf)

        term = antecedent['mf2']
        expected_mf_value = np.asarray([0.2, 0.1, 0, 1, 0])  # fx[0], fx[1], fx[2], fx[3], f[4]
        np.testing.assert_array_equal(expected_mf_value, term.mf)

        term = antecedent['mf3']
        expected_mf_value = np.asarray([0, 1, 0, 0, 0])  # fx[0], fx[1], fx[2], fx[3], f[4]
        np.testing.assert_array_equal(expected_mf_value, term.mf)
示例#4
0
    def test_consequent_define_universe_override_range_defined_in_var_if_defined_in_consequent(self):
        fcl_text = """
        FUNCTION_BLOCK my_system
            VAR_output
                consequent1 : REAL (1 .. 9);
            END_VAR
            DEFUZZIFY consequent1
                RANGE := (0 .. 30);
            END_DEFUZZIFY
        END_FUNCTION_BLOCK
        """
        lexer = FclLexer(InputStream(fcl_text))
        stream = CommonTokenStream(lexer)
        parser = FclParser(stream)
        tree = parser.main()

        listener = ScikitFuzzyFclListener()
        walker = ParseTreeWalker()
        walker.walk(listener, tree)
        listener = ScikitFuzzyFclListener()
        walker = ParseTreeWalker()
        walker.walk(listener, tree)
        consequents = listener.consequents
        expected_universe = np.asarray([0., 30.])
        self.assertIn('consequent1', consequents)
        self.assertEqual('consequent1', consequents.get('consequent1').get('value').label)
        np.testing.assert_array_equal(expected_universe, consequents.get('consequent1').get('value').universe)
示例#5
0
def main(argv):
    global mode, build_parse_tree, token_stream

    input_file_name = "default_input_file"

    if len(argv) > 1:
        input_file_name = argv[1]

        if len(argv) > 2:
            mode = argv[2].lower()

            if len(argv) > 3:
                build_parse_tree = argv[3].lower() == "true"

    data = ""
    with open("default_input_file", "r") as file:
        data = file.read().replace("\n", "")
    code_stream = InputStream(data)
    lexer = __TemplateGrammarName__Lexer(code_stream)
    token_stream = CommonTokenStream(lexer)

    __TemplateGrammarName_____RuntimeName__()
    benchmark(True, warm_up_count)
    time = benchmark(False, iteration_count)

    with open("__TemplateGrammarName_____RuntimeName__.benchmark",
              "w") as result_file:
        result_file.write(str(time))
示例#6
0
    def test_antecedents_terms_have_correct_mf_values_with_more_then_one_term(self):
        fcl_text = """
        FUNCTION_BLOCK my_system
            FUZZIFY antecedent1
                TERM mf1 := (0, 1) (0.5, 0);
                TERM mf2 := (1, 0.3) (2, 0) (3, 1);
                TERM mf3 := (2, 0.4) (4, 1) (5, 1);
            END_FUZZIFY
        END_FUNCTION_BLOCK
        """
        lexer = FclLexer(InputStream(fcl_text))
        stream = CommonTokenStream(lexer)
        parser = FclParser(stream)
        tree = parser.main()

        listener = ScikitFuzzyFclListener()
        walker = ParseTreeWalker()
        walker.walk(listener, tree)
        antecedent = listener.antecedents.get('antecedent1').get('value')
        term = antecedent['mf1']
        expected_mf_value = np.asarray([1, 0, 0, 0, 0, 0, 0])
        np.testing.assert_array_equal(expected_mf_value, term.mf)

        term2 = antecedent['mf2']
        expected_mf_value = np.asarray([0, 0, 0.3, 0, 1, 0, 0])
        np.testing.assert_array_equal(expected_mf_value, term2.mf)

        term3 = antecedent['mf3']
        expected_mf_value = np.asarray([0, 0, 0, 0.4, 0.7, 1, 1])
        np.testing.assert_array_equal(expected_mf_value, term3.mf)
示例#7
0
    def test_rule_if_clause_condition_then_clause_with_x(self):
        fcl_text = """
        FUNCTION_BLOCK f_block
            RULEBLOCK rule1
                RULE first_rule : IF something AND otherthing THEN final IS final2 WITH 123;
            END_RULEBLOCK
        END_FUNCTION_BLOCK
        """

        class FclListenerRules(FclListener):
            def enterThen_clause(_self, ctx):
                conclusion = ctx.conclusion()
                subconclusion = conclusion.sub_conclusion()[0]
                final = subconclusion.ID()[0].getText()
                final2 = subconclusion.ID()[1].getText()
                self.assertEqual(final, 'final')
                self.assertEqual(final2, 'final2')

            def enterWith_x(_self, ctx):
                real = ctx.REAL().getText()

                self.assertEqual(real, '123')

        lexer = FclLexer(InputStream(fcl_text))
        stream = CommonTokenStream(lexer)
        parser = FclParser(stream)
        tree = parser.main()

        listener = FclListenerRules()
        walker = ParseTreeWalker()
        walker.walk(listener, tree)
示例#8
0
    def _expandDefine(self, tok):
        srcName = "macro '%s' on %s(%d:%d)" % (
            tok.text, tok.getInputStream().name, tok.line, tok.column)
        # try to get syntax of usage
        debug("---- internal macro processed ---")
        lex = type(self)(input=createInputStream(fromstr=tok.text[1:],
                                                 srcName="<usage %s>" %
                                                 srcName),
                         options=self.opts)
        par = VerexParser(CommonTokenStream(lex))
        usage = par.text_macro_usage()
        name = usage.text_macro_identifier().getText()
        if name not in self.opts.defines:
            raise VerpySyntaxError("Unknown macro '%s'" % name)
        args = []
        for a in usage.expression():
            args.append(a.getText())

        # create a LEX to handle expanded code
        expanded = self.opts.expandDefines(name, *args)
        debug("---- macro expaned '%s' -> %s ---" % (name, expanded))
        return type(self)(input=createInputStream(fromstr=expanded,
                                                  srcName="<expand %s>" %
                                                  srcName),
                          options=self.opts)
示例#9
0
    def test_var_input_and_output(self):
        fcl_text = """
        FUNCTION_BLOCK f_block
            VAR_INPUT
                input_id1 : REAL;
            END_VAR
            VAR_OUTPUT
                output_id1 : REAL;
            END_VAR
            VAR_INPUT
                input_id2 : REAL;
            END_VAR
        END_FUNCTION_BLOCK
        """
        lexer = FclLexer(InputStream(fcl_text))
        stream = CommonTokenStream(lexer)
        parser = FclParser(stream)
        tree = parser.main()
        listener = FclListenerTester()
        walker = ParseTreeWalker()
        walker.walk(listener, tree)

        self.assertEqual(['output_id1', 'REAL'], listener.outputs[0])
        self.assertEqual(['input_id1', 'REAL'], listener.inputs[0])
        self.assertEqual(['input_id2', 'REAL'], listener.inputs[1])
示例#10
0
    def test_rule_if_clause_condition_if_clause_with_and(self):
        fcl_text = """
        FUNCTION_BLOCK f_block
            RULEBLOCK rule1
                RULE first_rule : IF something AND otherthing THEN conclusion IS final;
            END_RULEBLOCK
        END_FUNCTION_BLOCK
        """

        class FclListenerRules(FclListener):
            def enterIf_clause(_self, ctx):
                condition = ctx.condition()
                something = condition.getChild(0).getText()
                operator = condition.getChild(1).getText()
                otherthing = condition.getChild(2).getText()
                self.assertEqual(something, 'something')
                self.assertEqual(operator, 'AND')
                self.assertEqual(otherthing, 'otherthing')

        lexer = FclLexer(InputStream(fcl_text))
        stream = CommonTokenStream(lexer)
        parser = FclParser(stream)
        tree = parser.main()

        listener = FclListenerRules()
        walker = ParseTreeWalker()
        walker.walk(listener, tree)
示例#11
0
    def parse(actions, spdOutput, two_stageParsing):
        tokens = CommonTokenStream(SPDGrammarLexer(InputStream(spdOutput)))
        parser = SPDGrammarParser(tokens)
        visitor = SPDGrammarVisitorImplementation(actions)

        if not two_stageParsing:
            visitor.visit(parser.json())

            return visitor._errors

        parser._interp.predictionMode = PredictionMode.SLL
        parser.removeErrorListeners()
        parser._errHandler = BailErrorStrategy()

        try:
            visitor.visit(parser.json())
        except RuntimeError as exception:
            if isinstance(exception, RecognitionException):
                tokens.seek(0)
                parser.addErrorListener(ConsoleErrorListener.INSTANCE)
                parser._errHandler = DefaultErrorStrategy()
                parser._interp.predictionMode = PredictionMode.LL
                visitor.visit(parser.json())

        return visitor._errors
示例#12
0
def main(argv):
    data = generate_data()
    code_stream = InputStream(data)
    lexer = LeftRecursionGrammarLexer(code_stream)
    global mode, tokens_stream
    tokens_stream = CommonTokenStream(lexer)

    mode = "SLL"
    sll_left_recursion = benchmark_average("left_recursion_test", "SLL")
    sll_not_left_recursion = benchmark_average("not_left_recursion_test",
                                               "SLL")

    mode = "LL"
    ll_left_recursion = benchmark_average("left_recursion_test", "LL")
    ll_not_left_recursion = benchmark_average("not_left_recursion_test", "LL")

    print("")

    print("SLL:")
    print(f"left recursion: {sll_left_recursion} us")
    print(f"not left recursion: {sll_not_left_recursion} us")
    print(f"ratio: {sll_not_left_recursion / sll_left_recursion:0.2f}")

    print("")

    print("LL:")
    print(f"left recursion: {ll_left_recursion} us")
    print(f"not left recursion: {ll_not_left_recursion} us")
    print(f"ratio: {ll_not_left_recursion / ll_left_recursion:0.2f}")
示例#13
0
def parse(s, method):
    input = InputStream(s)
    lex = ZeamplLexer(input)
    tok_stream = CommonTokenStream(lex)
    p = ZeamplParser(tok_stream)
    ctx = method(p)
    builder = ZeamplASTBuilder(tok_stream)
    return ctx.accept(builder)
示例#14
0
文件: fim_rus.py 项目: fox0/fim_rus
def main(filename):
    lexer = FimRusLexer(FileStream(filename, encoding='utf8'))
    stream = CommonTokenStream(lexer)
    parser = FimRusParser(stream)
    tree = parser.program()
    listener = Listener()
    walker = ParseTreeWalker()
    walker.walk(listener, tree)
    def testInsertAfterLastIndex(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)
        rewriter.insertAfter(10, 'x')

        self.assertEquals(rewriter.getDefaultText(), 'abcx')
    def testInsertBeforeIndexZero(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)
        rewriter.insertBeforeIndex(0, '0')

        self.assertEquals(rewriter.getDefaultText(), '0abc')
示例#17
0
def compile(inputFile):
    input_stream = FileStream(inputFile)
    lexer = MusicGeneratorLexer(input_stream)
    stream = CommonTokenStream(lexer)
    parser = MusicGeneratorParser(stream)
    tree = parser.statements()
    printer = Listener()
    walker = ParseTreeWalker()
    walker.walk(printer, tree)
    def testReplaceSubsetThenFetch(self):
        input = InputStream('abcccba')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(2, 4, 'xyz')

        self.assertEquals('abxyzba', rewriter.getDefaultText())
    def testReplaceMiddleIndex(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceIndex(1, 'x')

        self.assertEquals(rewriter.getDefaultText(), 'axc')
    def testReplaceAll(self):
        input = InputStream('abcccba')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(0, 6, 'x')

        self.assertEquals('x', rewriter.getDefaultText())
    def testInsertBeforeTokenThenDeleteThatToken(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.insertBeforeIndex(1, 'foo')
        rewriter.replaceRange(1, 2, 'foo')

        self.assertEquals('afoofoo', rewriter.getDefaultText())
    def testLeaveAloneDisjointInsert2(self):
        input = InputStream('abcc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(2, 3, 'foo')
        rewriter.insertBeforeIndex(1, 'x')

        self.assertEquals('axbfoo', rewriter.getDefaultText())
    def testDropIdenticalReplace(self):
        input = InputStream('abcc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(1, 2, 'foo')
        rewriter.replaceRange(1, 2, 'foo')

        self.assertEquals('afooc', rewriter.getDefaultText())
    def testOverlappingReplace4(self):
        input = InputStream('abcc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(1, 2, 'foo')
        rewriter.replaceRange(1, 3, 'bar')

        self.assertEquals('abar', rewriter.getDefaultText())
    def testCombineInsertOnLeftWithDelete(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.delete('default', 0, 2)
        rewriter.insertBeforeIndex(0, 'z')

        self.assertEquals('z', rewriter.getDefaultText())
    def testCombineInsertOnLeftWithReplace(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(0, 2, 'foo')
        rewriter.insertBeforeIndex(0, 'z')

        self.assertEquals('zfoo', rewriter.getDefaultText())
    def test2InsertBeforeAfterMiddleIndex(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.insertBeforeIndex(1, 'x')
        rewriter.insertAfter(1, 'x')

        self.assertEquals(rewriter.getDefaultText(), 'axbxc')
    def testCombineInserts(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.insertBeforeIndex(0, 'x')
        rewriter.insertBeforeIndex(0, 'y')

        self.assertEquals('yxabc', rewriter.getDefaultText())
    def testReplaceSingleMiddleThenOverlappingSuperset(self):
        input = InputStream('abcba')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceIndex(2, 'xyz')
        rewriter.replaceRange(0, 3, 'foo')

        self.assertEquals('fooa', rewriter.getDefaultText())
    def testReplaceRangeThenInsertAfterRightEdge(self):
        input = InputStream('abcccba')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(2, 4, 'x')
        rewriter.insertAfter(4, 'y')

        self.assertEquals('abxyba', rewriter.getDefaultText())