Пример #1
0
def solution():
    # expressions = TEST_EXPRESSIONS
    with open(os.path.join(os.path.dirname(__file__), './input.txt'),
              'r') as fd:
        expressions = fd.read()

    # PART ONE
    s = 0
    for expression in expressions.splitlines():
        lexer = calc1Lexer(antlr4.InputStream(expression))
        stream = antlr4.CommonTokenStream(lexer)
        parser = calc1Parser(stream)
        tree = parser.expression()
        result = handle_expression(tree)
        s += result
    print("1:", s)

    # PART TWO
    s = 0
    for expression in expressions.splitlines():
        lexer = calc2Lexer(antlr4.InputStream(expression))
        stream = antlr4.CommonTokenStream(lexer)
        parser = calc2Parser(stream)
        tree = parser.expression()
        result = handle_expression(tree)
        s += result
    print("2:", s)
Пример #2
0
def getProductions(code):
    stream = antlr4.InputStream(code)
    lexer = JavaLexer(stream)
    toks = antlr4.CommonTokenStream(lexer)
    parser = JavaParserModified(toks)

    # We are always passing methods
    tree = parser.memberDeclaration()

    # Run a transition based parser on it to generate the dataset
    st = []
    st.append(tree)

    rule_seq = []

    # count = 0
    while (len(st) > 0):
        # print(count)
        # count+=1
        top = st.pop()
        (name, typ) = nname(top)
        if name == "ErrorN":
            return None  # There is a parsing error
        if typ == "T":  # Terminal
            pass
        else:  # Non-terminal
            rule = getRuleAtNode(top)
            rule_seq.append(rule)
            # put the rule in to the buffer
            for i in range(top.getChildCount() - 1, -1, -1):
                st.append(top.getChild(i))

    # Ignore the first 6 production rules
    return rule_seq[6:]
Пример #3
0
def parseCSharp(code):
    code = code.replace('\\n', '\n')
    parsedVersion = []
    stream = antlr4.InputStream(code)
    lexer = CSharp4Lexer(stream)
    toks = antlr4.CommonTokenStream(lexer)
    toks.fetch(500)

    identifiers = {}
    identCount = 0
    for token in toks.tokens:
        if token.type == 109:
            parsedVersion += ["CODE_INTEGER"]
        elif token.type == 111:
            parsedVersion += ["CODE_REAL"]
        elif token.type == 112:
            parsedVersion += ["CODE_CHAR"]
        elif token.type == 113:
            parsedVersion += ["CODE_STRING"]
        elif token.type == 9 or token.type == 7 or token.type == 6:  # whitespace and comments and newline
            pass
        else:
            parsedVersion += [str(token.text)]

    return parsedVersion
Пример #4
0
 def parseFromString(self,
                     buf,
                     encoding='latin-1',
                     trace=False,
                     dbname=":memory:"):
     self.fnbase = dbname
     return self.parse(antlr4.InputStream(buf), trace)
Пример #5
0
    def __call__(self, parser, namespace, values, option_string=None):
        from azure.cli.command_modules.monitor.grammar import (
            MetricAlertConditionLexer, MetricAlertConditionParser,
            MetricAlertConditionValidator)

        usage = 'usage error: --condition {avg,min,max,total,count} [NAMESPACE.]METRIC {=,!=,>,>=,<,<=} THRESHOLD\n' \
                '                         [where DIMENSION {includes,excludes} VALUE [or VALUE ...]\n' \
                '                         [and   DIMENSION {includes,excludes} VALUE [or VALUE ...] ...]]'

        string_val = ' '.join(values)

        lexer = MetricAlertConditionLexer(antlr4.InputStream(string_val))
        stream = antlr4.CommonTokenStream(lexer)
        parser = MetricAlertConditionParser(stream)
        tree = parser.expression()

        try:
            validator = MetricAlertConditionValidator()
            walker = antlr4.ParseTreeWalker()
            walker.walk(validator, tree)
            metric_condition = validator.result()
            for item in [
                    'time_aggregation', 'metric_name', 'threshold', 'operator'
            ]:
                if not getattr(metric_condition, item, None):
                    raise CLIError(usage)
        except (AttributeError, TypeError, KeyError):
            raise CLIError(usage)
        super(MetricAlertConditionAction,
              self).__call__(parser, namespace, metric_condition,
                             option_string)
Пример #6
0
def main() -> None:
    file_name = "/home/raptor/projects/thrifty/thriftpy/echo.thrift"
    with open(file_name, 'r', encoding='utf-8') as f:
        lexer = ThriftLexer(antlr4.InputStream(f.read()))

    token_stream = antlr4.CommonTokenStream(lexer)

    parser = ThriftParser(token_stream)

    tree_walker = antlr4.ParseTreeWalker()

    file_loader = FileLoader(name=file_name)
    tree_walker.walk(file_loader, parser.document())

    model = file_loader.thrifty_file

    # ====================================================
    # generate the files
    # ====================================================
    template_name = "/home/raptor/projects/thrifty/thriftgen/thriftgen/templates/py3/service.pyi.hbs"
    with open(template_name, 'r', encoding='utf-8') as template_file:
        template = template_file.read()

    hbs = pybars.Compiler().compile(source=template)
    print(hbs(model, helpers=helpers))
Пример #7
0
def tokenize(seq: Sequence[str]):
    counter = Counter()
    token_sequences = []

    i = -1
    for s in seq:
        i += 1
        if (i % 2000) == 0:
            print(f'tokenize:: query {i}/{len(seq)}')

        input_stream = antlr4.InputStream(s)
        lex = SqlBaseLexer(input_stream)
        tokens = lex.getAllTokens()

        token_sequence: deque[Tuple[str, int]] = deque()
        token_sequence.append((startToken, reverseTokenMap[startToken]))

        for t in tokens:
            text: str = "''" if t.type is reverseTokenMap['STRING'] else t.text
            token_type: int = t.type
            if token_type in tokenTypesWithUniqueValues:
                counter[text] += 1

            token_sequence.append((text, token_type))

        token_sequence.append((endToken, reverseTokenMap[endToken]))
        token_sequences.append(token_sequence)
    return token_sequences, counter
Пример #8
0
def get_code():
    errors = []
    code = ""
    code = request.form["codigo"]
    session.code = code
    print("errores", errors)
    if code != " ":
        text = antlr4.InputStream(code)
        lexer = DecafLexer(text)
        stream = CommonTokenStream(lexer)
        parser = DecafParser(stream)
        tree = parser.program()
        printer = DecafListener()
        walker = ParseTreeWalker()
        walker.walk(printer, tree)
        nani = Visitor.MyDecafVisitor()
        nani.visit(tree)
        errors = nani.ERRORS
        intermedio = inter.Inter(nani.total_scopes)
        intermedio.visit(tree)
        intercode = intermedio.line.split("\n")
    else:
        errors = []
    return render_template("home.html",
                           errors=errors,
                           code=code,
                           intercode=intercode)
Пример #9
0
    def __call__(self, parser, namespace, values, option_string=None):
        from azure.cli.command_modules.monitor.grammar.autoscale import (
            AutoscaleConditionLexer, AutoscaleConditionParser,
            AutoscaleConditionValidator)

        # pylint: disable=line-too-long
        usage = 'usage error: --condition ["NAMESPACE"] METRIC {==,!=,>,>=,<,<=} THRESHOLD {avg,min,max,total,count} PERIOD\n' \
                '                         [where DIMENSION {==,!=} VALUE [or VALUE ...]\n' \
                '                         [and   DIMENSION {==,!=} VALUE [or VALUE ...] ...]]'

        string_val = ' '.join(values)

        lexer = AutoscaleConditionLexer(antlr4.InputStream(string_val))
        stream = antlr4.CommonTokenStream(lexer)
        parser = AutoscaleConditionParser(stream)
        tree = parser.expression()

        try:
            validator = AutoscaleConditionValidator()
            walker = antlr4.ParseTreeWalker()
            walker.walk(validator, tree)
            autoscale_condition = validator.result()
            for item in [
                    'time_aggregation', 'metric_name', 'threshold', 'operator',
                    'time_window'
            ]:
                if not getattr(autoscale_condition, item, None):
                    raise CLIError(usage)
        except (AttributeError, TypeError, KeyError):
            raise CLIError(usage)

        namespace.condition = autoscale_condition
Пример #10
0
def init_visitor():
    # abrir el archivo de prueba para tokens
    with open('test.txt', 'r') as myfile:
        data = myfile.read()

    actual_data = antlr4.InputStream(data)
    # mandar al lexer el input del inpuntstream
    lexer = decafLexer(actual_data)
    stream = antlr4.CommonTokenStream(lexer)
    parser = decafParser(stream)

    tree = parser.program()

    c_visitor = EvalVisitor()

    # generate the basic types
    c_visitor.t_tipos.generate_default_values()
    # generate the default ambito
    c_visitor.t_ambitos.generate_default(c_visitor.t_tipos.search_type('void'))

    # Traverse the tree
    c_visitor.visit(tree)

    # THE INTERMEDIATE CODE VISITOR

    i_visitor = NewVisitor()
    # insert the values extracted from the prev visitor
    i_visitor.insert_tables(c_visitor.t_simbolos, c_visitor.t_tipos,
                            c_visitor.t_ambitos)
    # visit the tree
    i_visitor.visitAll(tree)

    return c_visitor, i_visitor
Пример #11
0
    def sniff(self, file_path):
        with open(file_path, encoding='windows-1250') as file:
            content = file.read()
            lines = content.split('\n')

        input_stream = antlr4.InputStream(content)
        lexer = DaedalusLexer(input_stream)
        token_stream = antlr4.CommonTokenStream(lexer)
        parser = DaedalusParser(token_stream)

        listener = SyntaxErrorListener()
        parser.addErrorListener(listener)
        parse_tree = parser.daedalusFile()

        if listener.errors_count:
            msg = f"{listener.errors_count} syntax error generated"
            print(msg, file=sys.stderr)
            return

        init_visitor = InitializationVisitor()
        init_visitor.visit(parse_tree)
        self.update_data(init_visitor.npcs, init_visitor.infos)

        sniffing_visitor = DataSniffingVisitor(lines, self.id_2_npc,
                                               self.id_2_info)
        sniffing_visitor.visit(parse_tree)
Пример #12
0
def run_nlpql_parser(nlpql_txt: str):
    lexer = nlpql_lexer(antlr4.InputStream(nlpql_txt))
    stream = antlr4.CommonTokenStream(lexer)
    parser = nlpql_parserParser(stream)
    tree = parser.validExpression()
    res = handle_expression(tree)
    return res
Пример #13
0
def casos_de_teste_semantico():
    print('----------------------------------------------------------')
    print('CASOS DE TESTE DO ANALISADOR SEMÂNTICO')
    print('----------------------------------------------------------')
    for i in range(1, 2):
        with open(DIRETORIO_PROJETO + CAMINHO_ARQUIVOS_ENTRADA + SEMANTICO +
                  'ct_semantico_' + str(i) + '.txt',
                  encoding='utf-8') as caso_de_teste:
            programa = caso_de_teste.read()
            programa_input = antlr4.InputStream(programa)

            lexer = tileLexer(input=programa_input)
            lexer.removeErrorListeners()
            tokens = antlr4.CommonTokenStream(lexer=lexer)

            parser = tileParser(tokens)

            parser.removeErrorListeners()
            erros_sintaticos = ErrosSintaticosErrorListener()
            parser.addErrorListener(erros_sintaticos)
            try:
                programa = parser.mapa()
                analisador_semantico = AnalisadorSemantico()
                analisador_semantico.visitMapa(programa)

                warnings = analisador_semantico.get_warnings()
                print('[CT' + str(i) + '_SEMANTICO] Compilação finalizada' +
                      (' com warnings. ' if warnings != '' else '.'))
                if warnings != '':
                    print('\t' + warnings.replace('\n', '\n\t'),
                          file=sys.stderr)
            except Exception as e:
                print('[CT' + str(i) + '_SEMANTICO] ' + str(e),
                      file=sys.stderr)
                pass
Пример #14
0
def casos_de_teste_sintatico():
    print('----------------------------------------------')
    print('CASOS DE TESTE DO ANALISADOR LÉXICO/SINTÁTICO ')
    print('----------------------------------------------')
    for i in range(0, 3):
        with open(DIRETORIO_PROJETO + CAMINHO_ARQUIVOS_ENTRADA + SINTATICO +
                  'ct_sintatico_' + str(i) + '.txt',
                  encoding='utf-8') as caso_de_teste:
            programa = caso_de_teste.read()

            programa_input = antlr4.InputStream(programa)

            lexer = tileLexer(input=programa_input)
            lexer.removeErrorListeners()
            tokens = antlr4.CommonTokenStream(lexer=lexer)

            parser = tileParser(tokens)

            parser.removeErrorListeners()
            erros_sintaticos = ErrosSintaticosErrorListener()
            parser.addErrorListener(erros_sintaticos)
            try:
                parser.mapa()
                print('[CT' + str(i) + '_SINTATICO] compilação finalizada.')
            except Exception as e:
                print('[CT' + str(i) + '_SINTATICO] ' + str(e),
                      file=sys.stderr)
                pass
Пример #15
0
    def compile(self, string, name, ret_type, args_types, verbose = True, mode32=False):

        input_stream = at.InputStream(string)
        lexer = EasyLexer(input_stream)
        stream = at.CommonTokenStream(lexer)
        parser = EasyParser(stream)
        tree = parser.compileUnit()

        generator = LLVMCodeGenerator()
        tree.accept(generator)
        if verbose:
            print(generator.module)



        mod = llvm.parse_assembly(str(generator.module))
        mod.verify()
        if mode32:
            mod.triple = "i386-pc-linux-gnu"
        else:
            mod.triple = self.target.create_target_machine().triple

        #print(target_machine.emit_assembly(mod))

        self.engine.add_module(mod)
        self.engine.finalize_object()
        self.engine.run_static_constructors()

        func_ptr = self.engine.get_function_address(name)

        func = CFUNCTYPE(ret_type, *args_types)(func_ptr)
        return func
Пример #16
0
def parse(
    text: str,
    start: str,
    strict=False,
    transform: Union[str, Callable] = None,
    error_listener: ErrorListener = None,
) -> ParseTree:

    lexer = LexerGo(antlr4.InputStream(text))
    lexer.removeErrorListeners()
    lexer.addErrorListener(LexerErrorListener())

    stream = CommonTokenStream(lexer)
    parser = ParserGo(stream)

    tree = parser.sourceFile()
    printer = ParserGoListener()
    walker = ParseTreeWalker()
    walker.walk(printer, tree)
    visitor = ParserGoVisitor()

    parser.buildParseTrees = True  # default

    if strict:
        error_listener = StrictErrorListener()

    if error_listener is not None and error_listener is not True:
        parser.removeErrorListeners()
        if error_listener:
            parser.addErrorListener(error_listener)

    print(Trees.toStringTree(tree, None, parser))

    return tree
Пример #17
0
    def traslate(self, string, verbose = False, optimize = False, mode32 = False):
        input_stream = at.InputStream(string)
        lexer = EasyLexer(input_stream)
        stream = at.CommonTokenStream(lexer)
        parser = EasyParser(stream)
        parser.addErrorListener(MyErrorListener())
        tree = parser.compileUnit()


        generator = LLVMCodeGenerator(mode32=mode32)
        tree.accept(generator)

        target_machine = self.target.create_target_machine()




        mod = llvm.parse_assembly(str(generator.module))
        mod.verify()
        if mode32:
            mod.triple = "i386-pc-linux-gnu"
        else:
            mod.triple = self.target.create_target_machine().triple

        if optimize:
            pmb = llvm.create_pass_manager_builder()
            pmb.opt_level = 3
            pm = llvm.create_module_pass_manager()
            pmb.populate(pm)
            pm.run(mod)

        if verbose:
            print(mod)

        return str(mod)
Пример #18
0
    def test_parse_string(self):
        """Test that device name is extracted"""
        bb = parse(antlr4.InputStream(test_file))

        assert bb._var == {"alpha": 0.3423}

        expected = {
            "name": "fock",
            "options": {
                "num_subsystems": 1,
                "cutoff_dim": 7,
                "shots": 10
            }
        }
        assert bb.target == expected

        expected = [
            {
                "op": "Coherent",
                "args": [0.3423, np.sqrt(np.pi)],
                "kwargs": {},
                "modes": [0]
            },
            {
                "op": "MeasureFock",
                "args": [],
                "kwargs": {},
                "modes": [0]
            },
        ]

        assert bb.operations == expected
Пример #19
0
    def __call__(self, parser, namespace, values, option_string=None):
        # antlr4 is not available everywhere, restrict the import scope so that commands
        # that do not need it don't fail when it is absent
        import antlr4

        from azext_scheduled_query.grammar.scheduled_query import (
            ScheduleQueryConditionLexer, ScheduleQueryConditionParser,
            ScheduleQueryConditionValidator)

        usage = 'usage error: --condition {avg,min,max,total,count} ["METRIC COLUMN" from]\n' \
                '                         "QUERY_PLACEHOLDER" {=,!=,>,>=,<,<=} THRESHOLD\n' \
                '                         [resource id RESOURCEID]\n' \
                '                         [where DIMENSION {includes,excludes} VALUE [or VALUE ...]\n' \
                '                         [and   DIMENSION {includes,excludes} VALUE [or VALUE ...] ...]]\n' \
                '                         [at least MinTimeToFail violations out of EvaluationPeriod aggregated points]'
        string_val = ' '.join(values)

        lexer = ScheduleQueryConditionLexer(antlr4.InputStream(string_val))
        stream = antlr4.CommonTokenStream(lexer)
        parser = ScheduleQueryConditionParser(stream)
        tree = parser.expression()

        try:
            validator = ScheduleQueryConditionValidator()
            walker = antlr4.ParseTreeWalker()
            walker.walk(validator, tree)
            scheduled_query_condition = validator.result()
            for item in ['time_aggregation', 'threshold', 'operator']:
                if not getattr(scheduled_query_condition, item, None):
                    raise InvalidArgumentValueError(usage)
        except (AttributeError, TypeError, KeyError):
            raise InvalidArgumentValueError(usage)
        super(ScheduleQueryConditionAction,
              self).__call__(parser, namespace, scheduled_query_condition,
                             option_string)
Пример #20
0
def _stringToAST(s):

    input_stream = antlr4.InputStream(s)

    error_listener = _SimpleErrorListener()

    #
    # Lexer
    #
    lexer = LangLexer.LangLexer(input_stream, output=None)
    lexer.removeErrorListeners()
    lexer.addErrorListener(error_listener)

    tokens = CommonTokenStream(lexer)

    #
    # Parser
    #
    parser = LangParser.LangParser(tokens, output=None)
    parser.removeErrorListeners()
    parser.addErrorListener(error_listener)

    parsed_tree = parser.program()

    return parser, parsed_tree
Пример #21
0
def text_to_seq(s: str,
                reverse_common_words_dictionary: Dict[str, int],
                i=None,
                omit_unknown=True):
    if i and i % 1000 == 0:
        print(f'iteration ${i}')
    unknownTokenCode = reverseTokenMap[unknownToken]
    startTokenCode = reverseTokenMap[startToken]

    input_stream = antlr4.InputStream(s)
    lex = SqlBaseLexer(input_stream)
    tokens = lex.getAllTokens()
    int_seq = [startTokenCode]
    for token in tokens:
        token_type = token.type
        text = token.text

        if (token_type == reverseTokenMap['WS']) or (token_type < 9):
            continue

        if token_type not in tokenTypesWithUniqueValues:
            int_seq.append(token_type)
            continue

        code = reverse_common_words_dictionary.get(text)
        if code is None:
            code = unknownTokenCode
            if omit_unknown:
                continue
        int_seq.append(code)
    return int_seq
Пример #22
0
def build_parse_tree(expr_str):
    data = codecs.decode(bytes(expr_str, 'utf-8'), 'ascii', 'strict')
    input = antlr4.InputStream(data)
    lexer = exprLexer(input)
    stream = antlr4.CommonTokenStream(lexer)
    parser = exprParser(stream)
    return ParseTree(parser.expression())
Пример #23
0
def loads(jsonv_str, bindings=None):
    bindings = bindings or {}
    f = antlr4.InputStream(jsonv_str)
    lexer = JSONvLexer(f)
    stream = antlr4.CommonTokenStream(lexer)
    parser = JSONvParser(stream)
    return JSONvPythonVisitor(bindings).visit(parser.jsonv())
Пример #24
0
def parseJava(code):
    code = code.replace('\\<nl>', '\n')
    parsedVersion = []
    stream = antlr4.InputStream(code)
    lexer = JavaLexer(stream)
    toks = antlr4.CommonTokenStream(lexer)
    toks.fetch(500)
    
    identifiers = {}
    identCount = 0
    for token in toks.tokens:
        if token.type == 51 or token.type == 52 or token.type == 53 or token.type ==54 or token.type == 57 or token.type == 60:
            parsedVersion += ["CODE_INTEGER"]
        elif token.type == 55 or token.type == 56:
            parsedVersion += ["CODE_REAL"]
        elif token.type == 58:
            parsedVersion += ["CODE_CHAR"]
        elif token.type == 59:
            parsedVersion += ["CODE_STRING"]
        elif token.type == 109 or token.type == 110 or token.type == 108: # whitespace and comments
            pass
        else:
            parsedVersion += [str(token.text)]

    return parsedVersion
Пример #25
0
def casos_gerador():
    with open('oi.txt', 'r') as caso_de_teste:
        mapa = caso_de_teste.read()
        mapa_input = antlr4.InputStream(mapa)

        lexer = TileMapLexer(input=mapa_input)
        lexer.removeErrorListeners()
        tokens = antlr4.CommonTokenStream(lexer=lexer)

        parser = TileMapParser(tokens)

        parser.removeErrorListeners()
        erros_sintaticos = ErrosSintaticosErrorListener()
        parser.addErrorListener(erros_sintaticos)
        try:
            mapa = parser.mapa()
            gerador = Gerador()
            gerador.visitMap(mapa)
            print('Compilação finalizada')

            codigo_gerado = gerador.getCodigo()

            arquivo = "Teste.html"
            arquivo_saida = open(arquivo, 'w', encoding='utf-8')
            arquivo_saida.write(codigo_gerado)
            arquivo_saida.close()
        except Exception as e:
            print(str(e), file=sys.stderr)
            pass
Пример #26
0
    def __process_sympy(latex):
        matherror = MathErrorListener(latex)

        stream = antlr4.InputStream(latex)
        lex = PSLexer(stream)
        lex.removeErrorListeners()
        lex.addErrorListener(matherror)

        tokens = antlr4.CommonTokenStream(lex)
        parser = PSParser(tokens)

        # remove default console error listener
        parser.removeErrorListeners()
        parser.addErrorListener(matherror)

        relation = parser.math().relation()
        expr = Math.__convert_relation(relation)

        if type(expr) in [int, float, One, Zero] or issubclass(
                Integer, type(expr)):
            return lambda: expr
        else:

            def fn(**kwargs):
                if type(expr) in [sympy.Integral, sympy.Limit]:
                    return expr.doit(**kwargs)
                else:
                    return expr.evalf(subs=kwargs)

            return fn
Пример #27
0
def getTree(s):
    """Returns antlr4's parse tree for input."""
    stream = antlr4.InputStream(s)
    lexer = myjsonLexer(stream)
    tokens = antlr4.CommonTokenStream(lexer)
    parser = myjsonParser(tokens)
    return parser.elem()
Пример #28
0
def antlr_parse(source, translation_unit=False):
    """
    Parse the `source` string into an AST node.

    We first call ANTLR4 to convert the `source` string into a parse tree. Then
    we convert the parse tree into an AST.

    translation_unit ... if True, only accept full programs or modules (the
    'root' rule). If False, accept any valid Fortran code (the 'unit' rule).
    """
    if source.strip() == "":
        return
    stream = antlr4.InputStream(source)
    lexer = fortranLexer(stream)
    tokens = antlr4.CommonTokenStream(lexer)
    parser = fortranParser(tokens)
    parser.removeErrorListeners()
    err = VerboseErrorListener()
    err.source = source
    parser.addErrorListener(err)
    if translation_unit:
        parse_tree = parser.root()
    else:
        parse_tree = parser.units()
    v = ASTBuilderVisitor()
    ast_tree = v.visit(parse_tree)
    if isinstance(ast_tree, list):
        assert len(ast_tree) > 1
        for x in ast_tree:
            assert isinstance(x, ast.AST)
    elif ast_tree is None:
        pass
    else:
        assert isinstance(ast_tree, ast.AST)
    return ast_tree
Пример #29
0
    def run(self, scalingFactor: float, inputFilename: str, prefix: str,
            directory: str):

        if not prefix:
            prefix = os.path.basename(os.path.splitext(prefix)[0])

        if not os.path.exists(directory):
            os.makedirs(directory)

        for value in getCodeInsideTIKZAfterUnrolling(inputFilename):
            self.printContents(value)
            input_stream = antlr4.InputStream(value)
            lexer = TikzLexer(input_stream)
            stream = antlr4.CommonTokenStream(lexer)
            parser = TikzParser(stream)
            parser.addErrorListener(TikzErrorListener())
            tree = parser.begin()

            # we save file as filename_t_{n}_graph.graphml
            # Getting next available output file path
            j = 0
            while (os.path.exists(directory + "/" + prefix + "_" + str(j) +
                                  "_graph.graphml")):
                j += 1
            outputFilename = directory + "/" + prefix + "_" + str(
                j) + "_graph.graphml"
            htmlChat = CustomTikzListener(inputFilename, outputFilename,
                                          scalingFactor)
            walker = antlr4.ParseTreeWalker()
            walker.walk(htmlChat, tree)
Пример #30
0
def test_handle_simple_if_statement():
    source = """
        class Main {
           function void main() {
              if (true) {
                do Output.printInt(1);
              }

              return;
           }
        }
    """
    result = compiler.generate(antlr4.InputStream(source))

    assert result == (
        "function Main.main 0\n"
        "push constant 0\n"
        "not\n"
        "not\n"
        "if-goto IF_END.2\n"
        "push constant 1\n"
        "call Output.printInt 1\n"
        "pop temp 0\n"
        "label IF_END.2\n"
        "push constant 0\n"
        "return\n"
    )