Ejemplo n.º 1
0
 def build_parse_pipeline(self, file: TextIOBase, makefile: Makefile) -> Iterator[Target]:
     file_lines = FileLineIterator.make(file)
     sub_conditions = [DatabaseSectionFilter.make(), FileSectionFilter.make(), InformationalCommentFilter.make()]
     filtered_lines = ConditionFilter.make(file_lines, CompositeCondition.make(sub_conditions))
     file_section_chars = LineToCharIterator.make(filtered_lines)
     char_stream_1 = None
     with StringIO() as strbuff:
         if file_section_chars.is_at_start:
             file_section_chars.move_to_next()
         while file_section_chars.has_current_item:
             strbuff.write(file_section_chars.current_item)
             file_section_chars.move_to_next()
         char_stream_1 = InputStream(cast(StringIO, strbuff).getvalue())
     paragraph_lexer = TargetParagraphLexer(char_stream_1)
     paragraph_tokens = TokenSourceToIteratorAdapter.make(paragraph_lexer)
     paragraph_chars = TokenToCharIterator.make(paragraph_tokens)
     char_stream_2 = None
     with StringIO() as strbuff:
         if paragraph_chars.is_at_start:
             paragraph_chars.move_to_next()
         while paragraph_chars.has_current_item:
             strbuff.write(paragraph_chars.current_item)
             paragraph_chars.move_to_next()
         char_stream_2 = InputStream(cast(StringIO, strbuff).getvalue())
     makefile_rule_lexer = MakefileRuleLexer(char_stream_2)
     token_stream_1 = CommonTokenStream(makefile_rule_lexer)
     makefile_rule_parser = MakefileRuleParser(token_stream_1)
     makefile_target_iterator = MakefileRuleParserToIteratorAdapter.make(makefile_rule_parser)
     makefile_target_iterator.makefile = makefile
     return makefile_target_iterator
Ejemplo n.º 2
0
 def __init__(self, path=None, stream=None, text=None):
     chars = None
     self.path = path
     if stream is not None:
         bytes = stream.read()
         data = codecs.decode(bytes)
         chars = InputStream(data)
         stream.close()
     elif text is not None:
         chars = InputStream(text)
     if chars is not None:
         lexer = MIndentingLexer(chars)
         tokens = CommonTokenStream(lexer)
         super().__init__(tokens)
Ejemplo n.º 3
0
 def __init__(self, path=None, stream=None, text=None):
     chars = None
     self.path = path
     if stream is not None:
         bytes = stream.read()
         data = bytes if isinstance(bytes, unicode) else codecs.decode(
             bytes, "utf-8")
         chars = InputStream(data)
         stream.close()
     elif text is not None:
         chars = InputStream(text)
     if chars is not None:
         lexer = MIndentingLexer(chars)
         tokens = CommonTokenStream(lexer)
         super(MCleverParser, self).__init__(tokens)
Ejemplo n.º 4
0
def main(argv):
    global mode, build_parse_tree, token_stream

    input_file_name = "default_input_file"

    if len(argv) > 1:
        input_file_name = argv[1]

        if len(argv) > 2:
            mode = argv[2].lower()

            if len(argv) > 3:
                build_parse_tree = argv[3].lower() == "true"

    data = ""
    with open("default_input_file", "r") as file:
        data = file.read().replace("\n", "")
    code_stream = InputStream(data)
    lexer = __TemplateGrammarName__Lexer(code_stream)
    token_stream = CommonTokenStream(lexer)

    __TemplateGrammarName_____RuntimeName__()
    benchmark(True, warm_up_count)
    time = benchmark(False, iteration_count)

    with open("__TemplateGrammarName_____RuntimeName__.benchmark",
              "w") as result_file:
        result_file.write(str(time))
Ejemplo n.º 5
0
    def parse(self):
        if self.spec is None:
            raise STLParseException('STL specification if empty')

        # Parse the STL spec - ANTLR4 magic

        entire_spec = self.modular_spec + self.spec
        input_stream = InputStream(entire_spec)
        lexer = StlLexer(input_stream)
        stream = CommonTokenStream(lexer)
        parser = StlParser(stream)
        parser._listeners = [STLParserErrorListener()]
        ctx = parser.specification_file()

        # Create the visitor for the actual spec nodes
        visitor = STLSpecificationParser(self)
        self.top = visitor.visitSpecification_file(ctx)

        # print('Hello')
        # print(self.unit)
        # print('sampling period unit: ' + str(self.sampling_period_unit))
        # print(self.U[self.unit])
        # print(self.U[self.sampling_period_unit])

        self.normalize = float(self.U[self.unit]) / float(
            self.U[self.sampling_period_unit])
Ejemplo n.º 6
0
    def tokenize(self, pattern: str):
        # split pattern into chunks: sea (raw input) and islands (<ID>, <expr>)
        chunks = self.split(pattern)

        # create token stream from text and tags
        tokens = list()
        for chunk in chunks:
            if isinstance(chunk, TagChunk):
                # add special rule token or conjure up new token from name
                if chunk.tag[0].isupper():
                    ttype = self.parser.getTokenType(chunk.tag)
                    if ttype == Token.INVALID_TYPE:
                        raise Exception("Unknown token " + str(chunk.tag) +
                                        " in pattern: " + pattern)
                    tokens.append(TokenTagToken(chunk.tag, ttype, chunk.label))
                elif chunk.tag[0].islower():
                    ruleIndex = self.parser.getRuleIndex(chunk.tag)
                    if ruleIndex == -1:
                        raise Exception("Unknown rule " + str(chunk.tag) +
                                        " in pattern: " + pattern)
                    ruleImaginaryTokenType = self.parser.getATNWithBypassAlts(
                    ).ruleToTokenType[ruleIndex]
                    tokens.append(
                        RuleTagToken(chunk.tag, ruleImaginaryTokenType,
                                     chunk.label))
                else:
                    raise Exception("invalid tag: " + str(chunk.tag) +
                                    " in pattern: " + pattern)
            else:
                self.lexer.setInputStream(InputStream(chunk.text))
                t = self.lexer.nextToken()
                while t.type != Token.EOF:
                    tokens.append(t)
                    t = self.lexer.nextToken()
        return tokens
Ejemplo n.º 7
0
    def parse(actions, spdOutput, two_stageParsing):
        tokens = CommonTokenStream(SPDGrammarLexer(InputStream(spdOutput)))
        parser = SPDGrammarParser(tokens)
        visitor = SPDGrammarVisitorImplementation(actions)

        if not two_stageParsing:
            visitor.visit(parser.json())

            return visitor._errors

        parser._interp.predictionMode = PredictionMode.SLL
        parser.removeErrorListeners()
        parser._errHandler = BailErrorStrategy()

        try:
            visitor.visit(parser.json())
        except RuntimeError as exception:
            if isinstance(exception, RecognitionException):
                tokens.seek(0)
                parser.addErrorListener(ConsoleErrorListener.INSTANCE)
                parser._errHandler = DefaultErrorStrategy()
                parser._interp.predictionMode = PredictionMode.LL
                visitor.visit(parser.json())

        return visitor._errors
Ejemplo n.º 8
0
    def morph(cls, src, slicing=False):
        #filename = "Test4.sparql"
        #inputstream = antlr4.InputStream.InputStream(src)
        inputstream = InputStream(src)
        lexer = SparqlLexer(inputstream)
        stream = antlr4.CommonTokenStream(lexer)
        parser = SparqlParser(stream)
        #tree = parser.StartRule()
        tree = parser.query()
        #fmind = Fmind(gname)
        #fnode = fmind.make_right(u"root")
        #toFmind(fnode, tree)
        #fmind.unfold_all()
        #fmind.dump_to_file("l.mm")

        #tree = parser.prologue()

        morpher = MorpherContext3(slicing=slicing)
        listener = MySparqlParserListener(morpher)
        walker = antlr4.ParseTreeWalker()
        walker.walk(listener, tree)

        #logging.info("Output:%s", sys.argv[1])
        #print "# ", sys.argv[1]
        return morpher.get_result()
Ejemplo n.º 9
0
def measure(db_path, graph, grammar_path, algorithm):
    grammar = ""
    start = None
    with open(grammar_path, "r") as f:
        for line in f:
            lhs, rhs = line.strip().split(" ", 1)
            if start is None:
                start = lhs
            grammar += f"{lhs} = {rhs};\n"

    script = f"""
connect "{db_path}";
{grammar}
select a, b from "{graph}" where path(a, b, {start}) using "{algorithm}";
"""

    print(script)

    t1 = time.time()
    parsed = parse_query(InputStream(script))
    executor = Executor(FileDatabase())
    executor.execute_many(parsed)
    t2 = time.time()

    return t2 - t1
Ejemplo n.º 10
0
def main(argv):
    data = generate_data()
    code_stream = InputStream(data)
    lexer = LeftRecursionGrammarLexer(code_stream)
    global mode, tokens_stream
    tokens_stream = CommonTokenStream(lexer)

    mode = "SLL"
    sll_left_recursion = benchmark_average("left_recursion_test", "SLL")
    sll_not_left_recursion = benchmark_average("not_left_recursion_test",
                                               "SLL")

    mode = "LL"
    ll_left_recursion = benchmark_average("left_recursion_test", "LL")
    ll_not_left_recursion = benchmark_average("not_left_recursion_test", "LL")

    print("")

    print("SLL:")
    print(f"left recursion: {sll_left_recursion} us")
    print(f"not left recursion: {sll_not_left_recursion} us")
    print(f"ratio: {sll_not_left_recursion / sll_left_recursion:0.2f}")

    print("")

    print("LL:")
    print(f"left recursion: {ll_left_recursion} us")
    print(f"not left recursion: {ll_not_left_recursion} us")
    print(f"ratio: {ll_not_left_recursion / ll_left_recursion:0.2f}")
Ejemplo n.º 11
0
def evaluateDTS(varTs, line):

    istream = InputStream(line)
    lexer = ExprLexer(istream)
    token_stream = CommonTokenStream(lexer)
    parser = ExprParser(token_stream)
    parser.varTs = varTs
    parser.expression()
Ejemplo n.º 12
0
def load_ast_from_str(data):
    f = InputStream(data)
    lexer = openflowLexer(f)
    lexer.addErrorListener(MyErrorListener())
    stream = CommonTokenStream(lexer)
    parser = openflowParser(stream)
    parser.addErrorListener(MyErrorListener())
    tree = parser.openflow_dump_text()
    return tree
Ejemplo n.º 13
0
 def newTokenStreamFromResource(self, resourceName):
     input_ = None  #ClassLoader.getSystemClassLoader().getResourceAsStream(resourceName)
     self.assertIsNotNone(input_)
     try:
         stream = InputStream(input)
         return ONamingLexer(stream)
     except Exception as e:
         self.fail(e.text)
         return None
Ejemplo n.º 14
0
def eval_ra_expr(database, ra_str):
    input_stream = InputStream(ra_str)
    lexer = RelationalAlgebraLexer(input_stream)
    token_stream = CommonTokenStream(lexer)
    parser = RelationalAlgebraParser(token_stream)

    parse_tree = parser.relStmt()
    visitor = RelationalAlgebraEvaluator(database)
    return visitor.visit(parse_tree)
Ejemplo n.º 15
0
    def testInsertAfterLastIndex(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)
        rewriter.insertAfter(10, 'x')

        self.assertEquals(rewriter.getDefaultText(), 'abcx')
Ejemplo n.º 16
0
    def testInsertBeforeIndexZero(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)
        rewriter.insertBeforeIndex(0, '0')

        self.assertEquals(rewriter.getDefaultText(), '0abc')
Ejemplo n.º 17
0
    def testReplaceMiddleIndex(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceIndex(1, 'x')

        self.assertEquals(rewriter.getDefaultText(), 'axc')
Ejemplo n.º 18
0
    def testReplaceSubsetThenFetch(self):
        input = InputStream('abcccba')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(2, 4, 'xyz')

        self.assertEquals('abxyzba', rewriter.getDefaultText())
Ejemplo n.º 19
0
    def testReplaceAll(self):
        input = InputStream('abcccba')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(0, 6, 'x')

        self.assertEquals('x', rewriter.getDefaultText())
Ejemplo n.º 20
0
def _parse(input):
    input_stream = InputStream(input)
    lexer = PromiseLexer(input_stream)
    token_stream = CommonTokenStream(lexer)
    parser = PromiseParser(token_stream)
    parser._listeners = [SyntaxErrorListener()]
    tree = parser.prog()
    visitor = PromiseSyntaxVisitor()
    visitor.visit(tree)
    return json.dumps(visitor.get_result())
Ejemplo n.º 21
0
def main():
    nom = sys.argv[1]
    print("Llegint el fitxer " + nom)
    inputStream = InputStream(open('cl/input/' + nom).read().strip())
    tokenStream = LexerPlusTokenizer(inputStream)
    tree = Parser(tokenStream)
    visitor = EnquestesVisitor()
    nodes, arestes = visitor.visit(tree)
    nom, graf = crearGraf(nodes, arestes)
    pickleDump(graf, nom)
Ejemplo n.º 22
0
def getLexerFromString(inputText):
    input_stream = InputStream(inputText)
    lexer = QLGrammarLexer(input_stream)
    token_stream = CommonTokenStream(lexer)
    parser = QLGrammarParser(token_stream)
    parser._listeners = [MyErrorListener()]

    tree = parser.form()
    tree_str = tree.toStringTree(recog=parser)
    return str(tree_str)
Ejemplo n.º 23
0
def main(argv):
    fileName = '../../Text'

    if len(argv) > 0:
        fileName = argv[1]

    code = open(fileName, 'r').read()
    codeStream = InputStream(code)
    lexer = __TemplateGrammarName__Lexer(codeStream)
    tokens = lexer.getAllTokens()
Ejemplo n.º 24
0
 def parse(cls, statment):
     stream = InputStream(statment)
     lexer = MySQLLexer(stream);
     stream = CommonTokenStream(lexer)
     parser = MySQLParser(stream)
     tree = parser.stat()
     visitor = Visitor(cls.dynamodb)
     if isinstance(visitor, ParseTreeVisitor):
         parsed_stat = visitor.visit(tree)
     return parsed_stat
Ejemplo n.º 25
0
    def split(self, path: str):
        input = InputStream(path)
        lexer = XPathLexer(input)

        def recover(self, e):
            raise e

        lexer.recover = recover
        lexer.removeErrorListeners()
        lexer.addErrorListener(
            ErrorListener())  # XPathErrorListener does no more
        tokenStream = CommonTokenStream(lexer)
        try:
            tokenStream.fill()
        except LexerNoViableAltException as e:
            pos = lexer.column
            msg = "Invalid tokens or characters at index %d in path '%s'" % (
                pos, path)
            raise Exception(msg, e)

        tokens = iter(tokenStream.tokens)
        elements = list()
        for el in tokens:
            invert = False
            anywhere = False
            # Check for path separators, if none assume root
            if el.type in [XPathLexer.ROOT, XPathLexer.ANYWHERE]:
                anywhere = el.type == XPathLexer.ANYWHERE
                next_el = next(tokens, None)
                if not next_el:
                    raise Exception('Missing element after %s' % el.getText())
                else:
                    el = next_el
            # Check for bangs
            if el.type == XPathLexer.BANG:
                invert = True
                next_el = next(tokens, None)
                if not next_el:
                    raise Exception('Missing element after %s' % el.getText())
                else:
                    el = next_el
            # Add searched element
            if el.type in [
                    XPathLexer.TOKEN_REF, XPathLexer.RULE_REF,
                    XPathLexer.WILDCARD, XPathLexer.STRING
            ]:
                element = self.getXPathElement(el, anywhere)
                element.invert = invert
                elements.append(element)
            elif el.type == Token.EOF:
                break
            else:
                raise Exception("Unknown path element %s" %
                                lexer.symbolicNames[el.type])
        return elements
Ejemplo n.º 26
0
    def testInsertBeforeTokenThenDeleteThatToken(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.insertBeforeIndex(1, 'foo')
        rewriter.replaceRange(1, 2, 'foo')

        self.assertEquals('afoofoo', rewriter.getDefaultText())
Ejemplo n.º 27
0
    def testLeaveAloneDisjointInsert2(self):
        input = InputStream('abcc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(2, 3, 'foo')
        rewriter.insertBeforeIndex(1, 'x')

        self.assertEquals('axbfoo', rewriter.getDefaultText())
Ejemplo n.º 28
0
    def testDropIdenticalReplace(self):
        input = InputStream('abcc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(1, 2, 'foo')
        rewriter.replaceRange(1, 2, 'foo')

        self.assertEquals('afooc', rewriter.getDefaultText())
Ejemplo n.º 29
0
    def testOverlappingReplace4(self):
        input = InputStream('abcc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.replaceRange(1, 2, 'foo')
        rewriter.replaceRange(1, 3, 'bar')

        self.assertEquals('abar', rewriter.getDefaultText())
Ejemplo n.º 30
0
    def testCombineInsertOnLeftWithDelete(self):
        input = InputStream('abc')
        lexer = TestLexer(input)
        stream = CommonTokenStream(lexer=lexer)
        stream.fill()
        rewriter = TokenStreamRewriter(tokens=stream)

        rewriter.delete('default', 0, 2)
        rewriter.insertBeforeIndex(0, 'z')

        self.assertEquals('z', rewriter.getDefaultText())