def main(argv, debug, formato_export):
    print("Compilando " + argv + "...")
    path = os.getcwd() + '/Programas/'
    programa = path + argv

    input = FileStream(programa, encoding='utf8')
    lexer = GramaticaLexer(input)
    stream = CommonTokenStream(lexer)
    parser = GramaticaParser(stream)
    tree = parser.gramatica()
    listen = GramaticaListener()
    walker = ParseTreeWalker()
    walker.walk(listen, tree)

    print("Ningun error de sintaxis encontrado")

    instrucciones_indexadas = []

    # Parseo en tuplas y lista de instrucciones
    print("Parseando archivo...")
    with open(programa) as file:
        contador = 0
        for line in file:
            line = line.strip()
            if(line):
                instrucciones_indexadas.append(
                    extraerOperandos(line, contador))
                contador += 1

    # # Se sustituyen las etiquetas
    print("Sustituyendo etiquetas...")
    posicion = 0
    for instruccion in instrucciones_indexadas:

        # Si la instruccion no es una etiqueta
        if(len(instruccion) > 2):
            nombre_instruccion = instruccion[1].lower()
            valor_instruccion_diccionario = instrucciones_diccionario[nombre_instruccion]

            # Si la instruccion tiene una etiqueta al final
            if(valor_instruccion_diccionario[3] == True):
                etiqueta = instruccion[-1]
                indice = encontrarEtiqueta(instrucciones_indexadas, etiqueta)
                instrucciones_indexadas[posicion][-1] = indice

        posicion += 1

    # Se remueven las etiquetas
    print("Removiendo etiquetas...")
    removerEtiquetas(instrucciones_indexadas)

    # Se crea un nuevo indice para las instrucciones
    print("Indexando instrucciones...")
    instrucciones_indexadas = indexarInstrucciones(instrucciones_indexadas)

    # Se reajusta utilizando el primer indice cada instruccion de salto
    print("Reajustando indices...")
    posicion = 0
    for instruccion in instrucciones_indexadas:
        nombre_instruccion = instruccion[2].lower()
        valor_instruccion_diccionario = instrucciones_diccionario[nombre_instruccion]
        if(valor_instruccion_diccionario[3] == True):
            if(nombre_instruccion == "j"):

                direccion = instruccion[-1]
                indice = encontrarEtiquetaReIndexada(
                    instrucciones_indexadas, direccion)
                instrucciones_indexadas[posicion][-1] = indice - 1

            else:
                direccion = instruccion[-1]
                indice = encontrarEtiquetaReIndexadaRelativa(
                    instrucciones_indexadas, direccion, instruccion[0])
                instrucciones_indexadas[posicion][-1] = indice

        posicion += 1

    # Se remueven los indices temporales
    print("Removiendo indices temporales...")
    instrucciones_indexadas = removerIndicePreTags(instrucciones_indexadas)

    # for instruccion in instrucciones_indexadas:
    #     print(instruccion)

    # for instruccion in instrucciones_indexadas:
    #     print(instruccion)

    # Compilado
    print("Realizando compilacion final...")
    instrucciones_compiladas = []
    for instruccion in instrucciones_indexadas:
        nombre_instruccion = instruccion[1].lower()
        valor_instruccion_diccionario = instrucciones_diccionario[nombre_instruccion]
        tipo_instruccion = valor_instruccion_diccionario[1]

        # Se requiere extraer el func
        if(tipo_instruccion == "R"):

            op_code = valor_instruccion_diccionario[0]
            funct = valor_instruccion_diccionario[2]

            if(valor_instruccion_diccionario[4] == False):
                # Los siguientes valores son binarios
                operando_rd = registros[instruccion[2]]
                operando_rs = registros[instruccion[3]]
                operando_rt = registros[instruccion[4]]
                shamt = "00000"

                if(debug):
                    instruccion_compilada = op_code + " " + operando_rs + " " + \
                        operando_rt + " " + operando_rd + " " + shamt + " " + funct
                else:
                    instruccion_compilada = op_code + operando_rs + \
                        operando_rt + operando_rd + shamt + funct

            else:
                print(instruccion)

                operando_rd = registros[instruccion[2]]
                operando_rs = registros[instruccion[3]]
                operando_rt = "00000"
                # str(binary(int(instruccion[4])))
                funct = valor_instruccion_diccionario[2]
                shamt = format("{0:b}".format(int(instruccion[4])))

                # if(len(shamt) < 5):
                #     shamt = extender5Bits(shamt)
                if(len(shamt) < 5):
                    if(int(instruccion[4]) >= 0):
                        shamt = extender5Bits(shamt)
                    else:
                        shamt = twosComplement(int(instruccion[4]), 5)[2:]

                if(debug):
                    instruccion_compilada = op_code + " " + operando_rs + " " + operando_rt + " " + \
                        operando_rd + " " + shamt + " " + funct
                else:
                    instruccion_compilada = op_code + operando_rs + operando_rt + \
                        operando_rd + shamt + funct

            instrucciones_compiladas.append(instruccion_compilada)

        elif(tipo_instruccion == "I"):
            # print(instruccion)
            if(valor_instruccion_diccionario[5] == False):

                # Los siguientes valores son binarios
                op_code = valor_instruccion_diccionario[0]
                # funct = valor_instruccion_diccionario[2]
                operando_rt = registros[instruccion[2]]
                operando_rs = registros[instruccion[3]]

                # bin(int(instruccion[4]))
                # print("HERE " + format("{0:b}".format(7)))
                inmediate = format("{0:b}".format(int(instruccion[4])))
                # print("HERE " + twosComplement(int(instruccion[4]), 16))
                if(len(inmediate) < 16):
                    if(int(instruccion[4]) >= 0):
                        inmediate = extender16Bits(inmediate)
                    else:
                        # inmediate = extender16BitsNegativos(inmediate)
                        inmediate = twosComplement(int(instruccion[4]), 16)[2:]
                        # print("Done " + inmediate)

                if(debug):
                    instruccion_compilada = op_code + " " + \
                        operando_rt + " " + operando_rs + " " + inmediate
                else:
                    instruccion_compilada = op_code + operando_rs + operando_rt + inmediate

                instrucciones_compiladas.append(instruccion_compilada)
            else:
                # Los siguientes valores son binarios
                op_code = valor_instruccion_diccionario[0]
                operando_rs = registros[instruccion[2]]
                operando_compuesto = instruccion[3]

                inmediate = operando_compuesto[0:operando_compuesto.find("(")]
                # inmediate = binary(int(inmediate))
                inmediate_decimal = inmediate
                inmediate = format("{0:b}".format(int(inmediate)))

                operando_rt = operando_compuesto[operando_compuesto.find(
                    "(") + 1:operando_compuesto.find(")")]

                operando_rt = registros[operando_rt]
                # print("THERE " + inmediate_decimal)
                if(len(inmediate) < 16):
                    if(int(inmediate_decimal) >= 0):
                        inmediate = extender16Bits(inmediate)
                    else:
                        # inmediate = extender16BitsNegativos(inmediate)
                        inmediate = twosComplement(
                            int(inmediate_decimal), 16)[2:]
                        # print("Done " + inmediate)

                if(debug):
                    instruccion_compilada = op_code + " " + operando_rt + " " + \
                        operando_rs + " " + inmediate
                else:
                    instruccion_compilada = op_code + operando_rt + \
                        operando_rs + inmediate

                instrucciones_compiladas.append(instruccion_compilada)

        elif(tipo_instruccion == "J"):
            # Los siguientes valores son binarios
            op_code = valor_instruccion_diccionario[0]
            funct = valor_instruccion_diccionario[2]
            address = str(binary(instruccion[2]))

            if(len(address) < 26):
                address = extender26Bits(address)

            if(debug):
                instruccion_compilada = op_code + " " + address
            else:
                instruccion_compilada = op_code + address

            instrucciones_compiladas.append(instruccion_compilada)
        else:
            print("Error encontrando tipo de instruccion")
            sys.exit()

    # for instruccion in instrucciones_compiladas:
    #     print(instruccion)

    print("Exportando archivo... " + formato_export)
    if(formato_export == "MIF"):
        archivo = ExportMIF(instrucciones_compiladas, argv)
    else:
        archivo = ExportMem(instrucciones_compiladas, argv)

    print("Archivo " + formato_export + " " + archivo + " generado!")
Пример #2
0
    def from_mint_file(filepath: str, skip_constraints: bool = False) -> MINTDevice:
        """Compiles the MINT file at the given path

        Args:
            filepath (str): absolute filepath of the mint file
            skip_constraints (bool, optional): flag to accept / skip constraint parsing.
            Defaults to False.

        Returns:
            MINTDevice: The parsed device from the MINT file
        """
        import io

        from antlr4 import CommonTokenStream, FileStream, ParseTreeWalker

        from pymint.antlrgen.mintLexer import mintLexer
        from pymint.antlrgen.mintParser import mintParser
        from pymint.constraints.constraintlistener import ConstraintListener
        from pymint.mintcompiler import MINTCompiler
        from pymint.mintErrorListener import MINTErrorListener

        finput = FileStream(filepath)

        lexer = mintLexer(finput)

        stream = CommonTokenStream(lexer)

        parser = mintParser(stream)

        # Connect the Error Listener
        parse_output = io.StringIO()
        parse_output.write("MINT SYNTAX ERRORS:\n")

        error_listener = MINTErrorListener(parse_output)
        parser.addErrorListener(error_listener)

        tree = parser.netlist()

        if error_listener.pass_through is False:
            print("STOPPED: Syntax Error(s) Found")
            sys.exit(0)

        walker = ParseTreeWalker()

        listener = MINTCompiler()

        walker.walk(listener, tree)

        if listener.current_device is None:
            raise AssertionError
        current_device = listener.current_device

        if skip_constraints is not True:
            print("Computing Constraints")
            constraint_listener = ConstraintListener(listener.current_device)

            walker.walk(constraint_listener, tree)

            current_device = listener.current_device

        return current_device
Пример #3
0
def main(inputname):
    lexer = TutuLexer(FileStream(inputname))
    stream = CommonTokenStream(lexer)
    parser = TutuParser(stream)
    parser.prog()
            print('wtf number is this????')
            exit(-1)
        pass


if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description='Simple Computer Turbo Assembler')
    parser.add_argument('file', type=str)

    args = parser.parse_args()
    print('Simple Computer Turbo Assembler 9000')
    print('        Brought to you by Gangweed Ganggang :D)))')

    filename = args.file
    input = FileStream(filename)
    lexer = scasmLexer(input)
    stream = CommonTokenStream(lexer)
    parser = scasmParser(stream)

    tree = parser.scasmProg()

    listener = scasmListener()
    walker = ParseTreeWalker()
    walker.walk(listener, tree)

    print('====== First Pass Symbol Table ======')
    print(symboltable)

    pass
Пример #5
0
    def parse_model(cls, file_path=None):
        """
        Parses a handed over model and returns the meta_model representation of it.
        :param file_path: the path to the file which shall be parsed.
        :type file_path: str
        :return: a new ASTNESTMLCompilationUnit object.
        :rtype: ASTNestMLCompilationUnit
        """
        try:
            input_file = FileStream(file_path)
        except IOError:
            code, message = Messages.get_input_path_not_found(path=file_path)
            Logger.log_message(node=None, code=None, message=message,
                               error_position=None, log_level=LoggingLevel.ERROR)
            return
        code, message = Messages.get_start_processing_file(file_path)
        Logger.log_message(node=None, code=code, message=message, error_position=None, log_level=LoggingLevel.INFO)

        # create a lexer and hand over the input
        lexer = PyNestMLLexer()
        lexer.removeErrorListeners()
        lexer.addErrorListener(ConsoleErrorListener())
        lexerErrorListener = NestMLErrorListener()
        lexer.addErrorListener(lexerErrorListener)
        # lexer._errHandler = BailErrorStrategy()  # N.B. uncomment this line and the next to halt immediately on lexer errors
        # lexer._errHandler.reset(lexer)
        lexer.inputStream = input_file
        # create a token stream
        stream = CommonTokenStream(lexer)
        stream.fill()
        if lexerErrorListener._error_occurred:
            code, message = Messages.get_lexer_error()
            Logger.log_message(node=None, code=None, message=message,
                               error_position=None, log_level=LoggingLevel.ERROR)
            return
        # parse the file
        parser = PyNestMLParser(None)
        parser.removeErrorListeners()
        parser.addErrorListener(ConsoleErrorListener())
        parserErrorListener = NestMLErrorListener()
        parser.addErrorListener(parserErrorListener)
        # parser._errHandler = BailErrorStrategy()	# N.B. uncomment this line and the next to halt immediately on parse errors
        # parser._errHandler.reset(parser)
        parser.setTokenStream(stream)
        compilation_unit = parser.nestMLCompilationUnit()
        if parserErrorListener._error_occurred:
            code, message = Messages.get_parser_error()
            Logger.log_message(node=None, code=None, message=message,
                               error_position=None, log_level=LoggingLevel.ERROR)
            return

        # create a new visitor and return the new AST
        ast_builder_visitor = ASTBuilderVisitor(stream.tokens)
        ast = ast_builder_visitor.visit(compilation_unit)

        # create and update the corresponding symbol tables
        SymbolTable.initialize_symbol_table(ast.get_source_position())
        for neuron in ast.get_neuron_list():
            neuron.accept(ASTSymbolTableVisitor())
            SymbolTable.add_neuron_scope(neuron.get_name(), neuron.get_scope())

        # store source paths
        for neuron in ast.get_neuron_list():
            neuron.file_path = file_path
        ast.file_path = file_path

        return ast
Пример #6
0
def generate_code(in_file, out_file=None):
    antlr = Antlr(G4VisitorLexer, G4VisitorParser)
    p = antlr.parser(FileStream(in_file))
    model = p.visitor().accept(G4VisitorVisitor())

    dirname = os.path.dirname(in_file)
    basename, ext = os.path.splitext(os.path.basename(in_file))
    if basename != model.name or ext != '.g4v':
        raise ValueError(
            f"Expected visitor in file '{model.name}.g4v', not in'{basename}{ext}'."
        )

    if out_file is None:
        out_file = os.path.join(dirname, basename + '.py')

    @print_to(out_file)
    def code():
        def cap(str):
            return str[0:1].upper() + str[1:]

        def visitor_code(visitor):
            visitorName, grammarName, rules = visitor

            yield from lines(f"""\
from gsl import pseudo_tuple

from gsl.antlr import ParseTreeVisitor
if __name__ is not None and "." in __name__:
    from .{grammarName}Parser import {grammarName}Parser
else:
    from {grammarName}Parser import {grammarName}Parser


""")
            for ruleName, body in rules:
                if isinstance(body, ObjectBody):
                    objectName, params = body
                    paramsStr = ' '.join(f"{param.name!r},"
                                         for param in params)
                    yield from lines(f"""\
{objectName} = pseudo_tuple({objectName!r}, ({paramsStr}))""")

            yield from lines(f"""\


class {visitorName}(ParseTreeVisitor):""")
            for ruleName, body in rules:
                yield from lines(f"""\
    def visit{cap(ruleName)}(self, ctx: {grammarName}Parser.{cap(ruleName)}Context):"""
                                 )
                yield from body_code(body)
                yield from lines(f"""\

""")

        def body_code(body):
            if isinstance(body, ObjectBody):
                yield from object_body_code(body)
            else:
                yield from expr_body_code(body)

        def object_body_code(objectBody):
            yield from lines(f"""\
        return {objectBody.name}(""")
            for paramName, expr, optional in objectBody.params:
                opt = f" if {expr_core_str(expr, True)} else None" if optional else ""
                yield from lines(f"""\
            {expr_str(expr)}{opt},""")
            yield from lines(f"""\
        )""")

        def expr_body_code(exprBody):
            yield from lines(f"""\
        return {expr_str(exprBody)}""")

        def expr_core_str(expr, check=False):
            if isinstance(expr, RuleExpr):
                args = "ctx" + ''.join(
                    f", {model.grammar}Parser.{cap(t)}Context"
                    for t in expr.rules)
                operation = "self.has_children" if check else "self.get_children" if expr.multi else "self.get_child"
                return f"{operation}({args})"
            elif isinstance(expr, TokenExpr):
                return f"ctx.{expr.token}()"
            elif isinstance(expr, RefExpr):
                return f"ctx.{expr.ref}"

        def expr_str(expr):
            core = expr_core_str(expr)
            operation = "bool" if expr.presence else "self.visitNode" if isinstance(
                expr, RefExpr) or not expr.multi else "self.visitNodes"
            return f"{operation}({core})"

        yield from visitor_code(model)
Пример #7
0
def main(argv):
    lexer = SQLLexer(FileStream(argv[1]))
    stream = CommonTokenStream(lexer)
    parser = SQLParser(stream)
    tree = parser.parse()
    print(tree.toStringTree())
Пример #8
0
                    action='store_true',
                    required=False,
                    default=False,
                    dest='print',
                    help='Print true and unknown facts to stdout')

parser.add_argument(
    '--output',
    type=str,
    required=False,
    default=None,
    dest='output_dir',
    help='If specified, true and unknown facts will be output in the directory',
)

args = parser.parse_args()

if __name__ == '__main__':
    engine = Engine(FileStream(args.file))

    engine.run()

    if args.print:
        engine.print_all()

    if args.output_dir:
        if not os.path.exists(args.output_dir):
            os.makedirs(args.output_dir)

        engine.output_all(args.output_dir)
Пример #9
0
def parseFromFile(events, filename):
    return parseFromStream(events, FileStream(filename), filename)
Пример #10
0
# %%
from concurrent import futures
with futures.ProcessPoolExecutor(max_workers=4) as executor:
    future_map = {executor.submit(parse_problem, p): p for p in problems}
    for future in futures.as_completed(future_map, timeout=30):
        p = future_map[future]
        try:
            formula_selection, includes = future.result()
            print(p, includes, formula_selection)
        except Exception as e:
            print(p, e)

# %%
p = tptp.problems['KRS180+1']
print(p.name)
lexer = tptp_v7_0_0_0Lexer(FileStream(p.file))
listener = parse(lexer)
print(listener.includes)
print(type(listener.includes[0][0]))
print(dir(listener.includes[0][0]))

# %%
c = listener.includes[0][0]
c.getChild(1)
print(c.symbol)

# %%
from pathlib import Path
a = tptp.find_by_name('Axioms/KRS001+1.ax')
print(a)
# %%
Пример #11
0
    def _parse(self, grammar, encoding, lib_dir):
        work_list = {grammar}
        root = None

        while work_list:
            grammar = work_list.pop()

            antlr_parser = self.antlr_parser_cls(CommonTokenStream(self.antlr_lexer_cls(FileStream(grammar, encoding=encoding))))
            current_root = antlr_parser.grammarSpec()
            # assert antlr_parser._syntaxErrors > 0, 'Parse error in ANTLR grammar.'

            # Save the 'outermost' grammar.
            if not root:
                root = current_root
            else:
                # Unite the rules of the imported grammar with the host grammar's rules.
                for rule in current_root.rules().ruleSpec():
                    root.rules().addChild(rule)

            work_list |= self._collect_imports(current_root, dirname(grammar), lib_dir)

        return root
Пример #12
0
def parse_file(path: str):
    content = FileStream(path)
    return parse_content(content)
Пример #13
0
    def exitStatsAggTermList(self, ctx: SPLParser.StatsAggTermListContext):
        ctx.ret = [
            child.ret for child in ctx.getChildren(
                lambda x: isinstance(x, SPLParser.StatsAggTermContext))
        ]

    def exitStats(self, ctx: SPLParser.StatsContext):
        stats = Stats()
        stats.stats_agg_terms = ctx.statsAggTermList().ret
        if (field_list := ctx.fieldList()):
            stats.by_fields = field_list.ret
        ctx.ret = stats


if __name__ == '__main__':
    if len(sys.argv) > 1:
        input_stream = FileStream(sys.argv[1], encoding='utf8')
    else:
        input_stream = InputStream(sys.stdin.read())

    lexer = SPLLexer(input_stream)
    token_stream = CommonTokenStream(lexer)
    parser = SPLParser(token_stream)
    tree = parser.pipeline()

    walker = ParseTreeWalker()
    pipeliner = Pipeliner()
    walker.walk(pipeliner, tree)
    for c in pipeliner.cmds:
        print(c)
Пример #14
0
def compile_file(path: str):
    print("Parsing file", path)
    print_lexer_debug(path, FileStream(path, encoding='utf-8'))
    return compile_stream(path, FileStream(path, encoding='utf-8'))
Пример #15
0
    def test_parse(self):
        engine = Dynabuffers.parse(FileStream(self.root_dir + "/schema04.dbs"))
        map = engine.deserialize(engine.serialize({"m1": {"s":"test"}, "m2" : {"t":"hello world!"}}))

        self.assertEqual(map, {"m1": {"s":"test", ":type":1}, "m2" : {"t":"hello world!", ":type":0}})
Пример #16
0
def configurationFromFile(filename):
    return configurationFromStream(FileStream(filename), filename)
Пример #17
0
def main(inputname,
         reg_alloc,
         typecheck=True,
         typecheck_only=False,
         stdout=False,
         output_name=None,
         debug=False,
         debug_graphs=False):
    (basename, rest) = os.path.splitext(inputname)
    if not typecheck_only:
        if stdout:
            output_name = None
            print("Code will be generated on standard output")
        elif output_name is None:
            output_name = basename + ".s"
            print("Code will be generated in file " + output_name)

    input_s = FileStream(inputname, encoding='utf-8')
    lexer = MiniCLexer(input_s)
    counter = CountErrorListener()
    lexer._listeners.append(counter)
    stream = CommonTokenStream(lexer)
    parser = MiniCParser(stream)
    parser._listeners.append(counter)
    tree = parser.prog()
    if counter.count > 0:
        exit(
            3
        )  # Syntax or lexicography errors occured, don't try to go further.
    if typecheck:
        typing_visitor = MiniCTypingVisitor()
        try:
            typing_visitor.visit(tree)
        except MiniCTypeError as e:
            print(e.args[0])
            exit(2)

    if typecheck_only:
        if debug:
            print("Not running code generation because of --typecheck-only.")
        return

    # Codegen 3@ CFG Visitor, first argument is debug mode
    visitor3 = MiniCCodeGen3AVisitor(debug, parser)

    # dump generated code on stdout or file.
    with open(output_name, 'w') if output_name else sys.stdout as output:
        visitor3.visit(tree)
        for function in visitor3.get_functions():
            # Allocation part
            allocator = None
            if reg_alloc == "naive":
                allocator = NaiveAllocator(function)
                comment = "naive allocation"
            elif reg_alloc == "all_in_mem":
                allocator = AllInMemAllocator(function)
                comment = "all-in-memory allocation"
            elif reg_alloc == "smart":
                allocator = SmartAllocator(function, basename, debug,
                                           debug_graphs)
                comment = "smart allocation with graph coloring"
            elif reg_alloc == "none":
                comment = "non executable 3-Address instructions"
            else:
                raise ValueError("Invalid allocation strategy:" + reg_alloc)
            if allocator:
                allocator.run()
            function.printCode(output, comment=comment)
            if debug:
                visitor3.printRegisterMap()  # print allocation
Пример #18
0

if __name__ == '__main__':
    source_class = "JSONArray"
    source_package = "org.json"
    target_class = "JSONObject"
    target_package = "org.json"
    field_name = "myArrayList"
    path = ""
    files = get_filenames_in_dir(
        '/home/loop/Desktop/Ass/Compiler/CodART/benchmark_projects/JSON/src/main/java/org/json/'
    )
    field = None
    methods_tobe_update = []
    for file in files:
        stream = FileStream(file, encoding='utf8')
        lexer = JavaLexer(stream)
        token_stream = CommonTokenStream(lexer)
        parser = JavaParser(token_stream)
        tree = parser.compilationUnit()
        utilsListener = PreConditionListener(file)
        walker = ParseTreeWalker()
        walker.walk(utilsListener, tree)

        if not utilsListener.can_convert:
            continue

        if len(utilsListener.package.classes) > 1:
            exit(1)

        # find fields with the type Source first and store it
Пример #19
0
import sys
from antlr4 import CommonTokenStream, FileStream
from antlr4.InputStream import InputStream
from cLexer import cLexer
from cParser import cParser
from org2html import Org2Html

if __name__ == '__main__':
    if len(sys.argv) > 1:
        input_stream = FileStream(sys.argv[1])
    else:
        input_stream = InputStream(sys.stdin.readline())

    lexer = cLexer(input_stream)
    token_stream = CommonTokenStream(lexer)
    parser = cParser(token_stream)
    tree = parser.org()

    visitor = Org2Html()
    visitor.visit(tree)

    html = visitor.html + '</html>'

    print(html)
    f = '{}.html'.format(''.join(sys.argv[1].split('.')[:-1]))
    with open(f, 'w') as f:
        f.write(html)
Пример #20
0
    def refactor(self, ctx):
        """
        Main method for refactoring.
        :param ctx:
        :return:
        """
        self.create_new_method(ctx.stop.tokenIndex - 1)
        self.replace_duplicate_code()


if __name__ == "__main__":
    input_directory = r"/data/Dev/JavaSample"
    for root, dirs, files in os.walk(input_directory):
        for input_file in files:
            if input_file.endswith(".java"):
                stream = FileStream(os.path.join(root, input_file),
                                    encoding='utf8')
                lexer = JavaLexer(stream)
                token_stream = CommonTokenStream(lexer)
                parser = JavaParserLabeled(token_stream)
                parser.getTokenStream()
                parse_tree = parser.compilationUnit()
                my_listener = ExtractMethodRefactoring(
                    common_token_stream=token_stream,
                    class_name="DuplicateCode",
                    new_method_name="printCode")
                walker = ParseTreeWalker()
                walker.walk(t=parse_tree, listener=my_listener)
                with open(os.path.join(root, input_file), mode='w',
                          newline='') as f:
                    f.write(
                        my_listener.token_stream_re_writer.getDefaultText())
Пример #21
0
 def parse_file(self, filename, start_rule='pddlDoc'):
     """ Parse a given filename starting from a given grammar rule """
     return self._parse_stream(FileStream(filename), start_rule)
Пример #22
0
def main():
    # Set the input to the file with the specified file name
    input_stream = FileStream('test.txt')

    # Split the input stream into its component tokens using the lexer
    lexer = RegularLexer(input_stream)
    stream = CommonTokenStream(lexer)

    # Make sense of the structure of the component tokens using a parse tree generated by the parser
    parser = RegularParser(stream)

    # Set the start rule of the language to the parse tree
    tree = parser.language()

    # Begin traversing the parse tree
    visitor = RegularEvaluator()

    # Get the machine
    machine = visitor.visit(tree)

    # Get the DFA version of the machine
    dfa = machine.dfa()

    print('Initial state:\n\t' + str(dfa.init_state))
    print('Final states:')

    for state in dfa.final_states:
        print('\t' + str(state))

    print('Transitions:')

    for state, transitions in dfa.state_table.items():
        print('\t' + ('>' if state == dfa.init_state else '') + str(state) +
              ('*' if state in dfa.final_states else ''))

        for stimulus, destination in transitions.items():
            print('\t\t' + stimulus + ' -> ' + str(destination))

    # Test the machine with input
    string = str(input('\nTest the machine (\'-\' to finish testing): '))

    while string != '-':
        current_state = dfa.init_state
        latest_stimulus = ''

        try:
            for symbol in string:
                latest_stimulus = symbol
                current_state = dfa.state_table[current_state][symbol]

            if current_state in dfa.final_states:
                print('\tAccepted.')
            else:
                print('\tNot accepted - terminated unaccepted @ state ' +
                      str(current_state) + ' on stimulus \'' +
                      str(latest_stimulus) + '\'.')
        except KeyError:
            print('\tNot accepted - rejected @ state ' + str(current_state) +
                  ' on stimulus \'' + str(latest_stimulus) + '\'.')

        string = str(input('Test the machine (enter to finish testing): '))
Пример #23
0
import argparse

from antlr4 import FileStream

from wrappers import ParseTreeWrapper

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--input', help='Path to query language script for parsing')
    parser.add_argument('-o', '--output', help='Path to output Abstract Syntax Tree (.dot graph representation)')
    parser.add_argument('-v', '--view', action='store_true', help='Open graph in the PDF view immediately')
    args = parser.parse_args()
    wrapper = ParseTreeWrapper(FileStream(args.input))
    if wrapper.ast is not None:
        wrapper.graph.render(args.output, view=args.view)
Пример #24
0
 def file_stream(self, file, encoding='ascii', errors='strict'):
     return FileStream(file, encoding, errors)
 def execute(self, input_source):
     parser = JavaParser(CommonTokenStream(JavaLexer(FileStream(input_source, encoding="utf-8"))))
     walker = ParseTreeWalker()
     walker.walk(self.listener, parser.compilationUnit())
     return self.listener.methods
Пример #26
0
 def parse_file(self, filename, start_rule='pddlDoc'):
     """ Parse a given filename starting from a given grammar rule """
     stream = FileStream(filename, encoding='utf-8')
     if self.case_insensitive:
         stream = LowerCasingStreamWrapper(stream)
     return self._parse_stream(stream, start_rule)
Пример #27
0
 def setUp(self):
     self.engine = Dynabuffers.parse(
         FileStream(self.root_dir + "/schema17.dbs"))
Пример #28
0
def main(argv):
    global representacao_arvore

    #os.system('clear')

    input_stream = FileStream(argv[1], encoding='utf-8')
    output = open(argv[1] + ".txt", "w")

    output.write("ARQUIVO ------------------------------------\n")
    output.write(str(input_stream) + "\n")
    output.write("--------------------------------------------\n\n")

    # Analisador Léxico
    lexer = visualgLexer(input_stream)

    output.write("TOKENS -------------------------------------\n")
    while 1:
        token = lexer.nextToken()
        tipo = ''
        linha = ''
        if token.type != Token.EOF:
            t = token.type
            linha = token.line
            if t == lexer.ABRE_COLCHETES:
                tipo = "<ABRE_COLCHETES>"
            elif t == lexer.ABRE_PARENTESES:
                tipo = "<ABRE_PARENTESES>"
            elif t == lexer.ALEATORIO:
                tipo = "<ALEATORIO>"
            elif t == lexer.ALGORITMO:
                tipo = "<ALGORITMO>"
            elif t == lexer.ARQUIVO:
                tipo = "<ARQUIVO>"
            elif t == lexer.ATE:
                tipo = "<ATE>"
            elif t == lexer.ATRIBUIR:
                tipo = "<ATRIBUIR>"
            elif t == lexer.BOOL:
                tipo = "<BOOL>"
            elif t == lexer.CASO:
                tipo = "<CASO>"
            elif t == lexer.CRONOMETRO:
                tipo = "<CRONOMETRO>"
            elif t == lexer.DE:
                tipo = "<DE>"
            elif t == lexer.DOIS_PONTOS:
                tipo = "<DOIS_PONTOS>"
            elif t == lexer.ECO:
                tipo = "<ECO>"
            elif t == lexer.ENQUANTO:
                tipo = "<ENQUANTO>"
            elif t == lexer.ENTAO:
                tipo = "<ENTAO>"
            elif t == lexer.ESCOLHA:
                tipo = "<ESCOLHA>"
            elif t == lexer.ESCREVA:
                tipo = "<ESCREVA>"
            elif t == lexer.FACA:
                tipo = "<FACA>"
            elif t == lexer.FECHA_COLCHETES:
                tipo = "<FECHA_COLCHETES>"
            elif t == lexer.FECHA_PARENTESES:
                tipo = "<FECHA_PARENTESES>"
            elif t == lexer.FIM_ALGORITMO:
                tipo = "<FIM_ALGORITMO>"
            elif t == lexer.FIM_ENQUANTO:
                tipo = "<FIM_ENQUANTO>"
            elif t == lexer.FIM_ESCOLHA:
                tipo = "<FIM_ESCOLHA>"
            elif t == lexer.FIM_FUNCAO:
                tipo = "<FIM_FUNCAO>"
            elif t == lexer.FIM_PARA:
                tipo = "<FIM_PARA>"
            elif t == lexer.FIM_SE:
                tipo = "<FIM_SE>"
            # elif t == lexer.FUNCAO:
            #     tipo = "<FUNCAO>"
            elif t == lexer.INICIO:
                tipo = "<INICIO>"
            elif t == lexer.INTEIRO:
                tipo = "<INTEIRO>"
            elif t == lexer.INTERROMPA:
                tipo = "<INTERROMPA>"
            elif t == lexer.LEIA:
                tipo = "<LEIA>"
            elif t == lexer.LIMPATELA:
                tipo = "<LIMPATELA>"
            elif t == lexer.NOME_ARQUIVO:
                tipo = "<NOME_ARQUIVO>"
            elif t == lexer.VARIAVEL:
                tipo = "<VARIAVEL>"
            elif t == lexer.DECLARACAO_FUNCAO:
                tipo = "<DECLARACAO_FUNCAO>"
            elif t == lexer.OFF:
                tipo = "<OFF>"
            elif t == lexer.ON:
                tipo = "<ON>"
            elif t == lexer.OUTRO_CASO:
                tipo = "<OUTRO_CASO>"
            elif t == lexer.OPERADOR_UNARIO:
                tipo = "<OPERADOR_UNARIO>"
            elif t == lexer.PARA:
                tipo = "<PARA>"
            elif t == lexer.PASSO:
                tipo = "<PASSO>"
            elif t == lexer.PAUSA:
                tipo = "<PAUSA>"
            elif t == lexer.PONTO_PONTO:
                tipo = "<PONTO_PONTO>"
            elif t == lexer.PONTO_VIRGULA:
                tipo = "<PONTO_VIRGULA>"
            elif t == lexer.REAL:
                tipo = "<REAL>"
            elif t == lexer.REPITA:
                tipo = "<REPITA>"
            elif t == lexer.RETORNO:
                tipo = "<RETORNO>"
            elif t == lexer.SE:
                tipo = "<SE>"
            elif t == lexer.SENAO:
                tipo = "<SENAO>"
            elif t == lexer.STRING:
                tipo = "<STRING>"
            elif t == lexer.TIMER:
                tipo = "<TIMER>"
            elif t == lexer.VAR:
                tipo = "<VAR>"
            elif t == lexer.VETOR:
                tipo = "<VETOR>"
            elif t == lexer.VIRGULA:
                tipo = "<VIRGULA>"
            elif t == lexer.BARRA_BARRA:
                tipo = "<BARRA_BARRA>"
            elif t == lexer.VOID:
                tipo = "<VOID>"
            elif t == lexer.TIPO_DE_DADO:
                tipo = "<TIPO_DE_DADO>"
            elif t == lexer.OP_SOM:
                tipo = "<SOMA>"
            elif t == lexer.OP_SUB:
                tipo = "<SUBTRACAO>"
            elif t == lexer.OP_MUL:
                tipo = "<MULTIPLICACAO>"
            elif t == lexer.OP_DIV:
                tipo = "<DIVISAO>"
            elif t == lexer.OP_RES:
                tipo = "<RESTO>"
            elif t == lexer.OP_POT:
                tipo = "<POTENCIA>"
            elif t == lexer.OP_DIV_INT:
                tipo = "<DIVISAO INTEIRA>"
            elif t == lexer.OP_MAIOR:
                tipo = "<MAIOR>"
            elif t == lexer.OP_MAIOR_IGUAL:
                tipo = "<MAIOR_IGUAL>"
            elif t == lexer.OP_MENOR:
                tipo = "<MENOR>"
            elif t == lexer.OP_MENOR_IGUAL:
                tipo = "<MENOR IGUAL>"
            elif t == lexer.OP_IGUAL:
                tipo = "<IGUAL>"
            elif t == lexer.OP_DIFERENTE:
                tipo = "<DIFERENTE>"
            elif t == lexer.OP_NAO:
                tipo = "<NAO>"
            elif t == lexer.OP_OU:
                tipo = "<OU>"
            elif t == lexer.OP_E:
                tipo = "<E>"
            elif t == lexer.OP_XOU:
                tipo = "<XOU>"
            elif t == lexer.MATRIZ:
                tipo = "<MATRIZ>"
            if tipo != '':
                output.write(str((linha, tipo, token.text)) + "\n")
            else:
                output.write(str((linha, token.text)) + "\n")
        else:
            break

    output.write("--------------------------------------------\n\n")
    # Analisador Sintático
    input_stream = FileStream(argv[1], encoding='utf-8')
    lexer = visualgLexer(input_stream)
    tokens = CommonTokenStream(lexer)
    parser = visualgParser(tokens)
    tree = parser.prog()
    # gerar_arvore(tree, parser.ruleNames)
    print("ARVORE SINTÁTICA -----------\n")
    output.write("ARVORE SINTÁTICA -----------\n")

    output.write(str(Trees.toStringTree(tree, None, parser)) + "\n")
    print(str(Trees.toStringTree(tree, None, parser)) + "\n")

    output.write("--------------------------------------------\n\n")
    print("--------------------------------------------\n\n")

    output.close()

    print(
        "Cheque agora o arquivo saida.txt na mesma pasta deste projeto para ver a saída mais detalhada"
    )
Пример #29
0
 def aster(path):
     lexer = BasisLexer(FileStream(path))
     stream = CommonTokenStream(lexer)
     parser = BasisParser(stream)
     visitor = PythonTarget()
     return visitor.visitProgram(parser.program())
Пример #30
0
from src.antlr.antlr_helper import TreeHelper
import argparse
from antlr4 import FileStream

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--input', help='Path to query script file')
    parser.add_argument('-o',
                        '--output',
                        help='Path to output visualization (.dot file)')
    parser.add_argument('-v',
                        '--view',
                        action='store_true',
                        help='Open visualization')
    args = parser.parse_args()
    input_stream = FileStream(args.input)
    tree_helper = TreeHelper(input_stream)
    tree_helper.get_visualization(args.output, show_view=args.view)