예제 #1
0
def main():
    print("Decrease Field Visibility")
    udb_path = "/home/ali/Documents/compiler/Research/xerces2-j/xerces2-j.udb"
    class_name = "AttributesImpl"
    field_name = "length"
    mainfile = ""
    db = und.open(udb_path)
    for cls in db.ents("class"):
        if (cls.simplename() == class_name):
            if cls.kindname() != "Unknown Class":
                mainfile = cls.parent().longname()

    stream = FileStream(mainfile, encoding='utf8')
    # Step 2: Create an instance of AssignmentStLexer
    lexer = JavaLexer(stream)
    # Step 3: Convert the input source into a list of tokens
    token_stream = CommonTokenStream(lexer)
    # Step 4: Create an instance of the AssignmentStParser
    parser = JavaParser(token_stream)
    parser.getTokenStream()
    parse_tree = parser.compilationUnit()
    my_listener = DecreaseFieldVisibilityRefactoringListener(
        common_token_stream=token_stream,
        source_class=class_name,
        field_name=field_name)
    walker = ParseTreeWalker()
    walker.walk(t=parse_tree, listener=my_listener)

    with open(mainfile, mode='w', newline='') as f:
        f.write(my_listener.token_stream_rewriter.getDefaultText())
예제 #2
0
 def generate_tree(file_path):
     # Step 1: Load input source into stream
     stream = FileStream(file_path, encoding='utf8', errors='ignore')
     # Step 2: Create an instance of AssignmentStLexer
     lexer = JavaLexer(stream)
     # Step 3: Convert the input source into a list of tokens
     token_stream = CommonTokenStream(lexer)
     # Step 4: Create an instance of the AssignmentStParser
     parser = JavaParser(token_stream)
     # Step 5: Create parse tree
     parse_tree = parser.compilationUnit()
     return parse_tree
예제 #3
0
    for cls in db.ents("class"):
        if cls.simplename() == class_name_:
            if cls.kindname() != "Unknown Class":
                main_file = cls.parent().longname()

    stream = FileStream(main_file_, encoding='utf8', errors='ignore')

    # Step 2: Create an instance of AssignmentStLexer
    lexer = JavaLexer(stream)

    # Step 3: Convert the input source into a list of tokens
    token_stream = CommonTokenStream(lexer)

    # Step 4: Create an instance of the AssignmentStParser
    parser = JavaParser(token_stream)
    parser.getTokenStream()
    parse_tree = parser.compilationUnit()

    my_listener = MakeFieldFinalRefactoringListener(
        common_token_stream=token_stream,
        source_class=class_name_,
        field_name=field_name_
    )

    walker = ParseTreeWalker()
    walker.walk(t=parse_tree, listener=my_listener)

    with open(main_file_, mode='w', encoding='utf8', errors='ignore', newline='') as f:
        f.write(my_listener.token_stream_rewriter.getDefaultText())
    db.close()
예제 #4
0
    source_class = "JSONArray"
    source_package = "org.json"
    target_class = "JSONObject"
    target_package = "org.json"
    field_name = "myArrayList"
    path = ""
    files = get_filenames_in_dir(
        '/home/loop/Desktop/Ass/Compiler/CodART/benchmark_projects/JSON/src/main/java/org/json/'
    )
    field = None
    methods_tobe_update = []
    for file in files:
        stream = FileStream(file, encoding='utf8')
        lexer = JavaLexer(stream)
        token_stream = CommonTokenStream(lexer)
        parser = JavaParser(token_stream)
        tree = parser.compilationUnit()
        utilsListener = PreConditionListener(file)
        walker = ParseTreeWalker()
        walker.walk(utilsListener, tree)

        if not utilsListener.can_convert:
            continue

        if len(utilsListener.package.classes) > 1:
            exit(1)

        # find fields with the type Source first and store it
        field_candidate = set()
        for klass in utilsListener.package.classes.values():
            for f in klass.fields.values():
예제 #5
0
def main():
    print("Increase Field Visibility")
    udb_path = "/home/ali/Documents/compiler/Research/xerces2-j/xerces2-j3.udb"
    class_name = "ListNode"
    field_name = "uri"

    file_list_to_be_propagate = set()
    propagate_classes = set()
    file_list_include_file_name_that_edited = ""
    mainfile = ""
    db = und.open(udb_path)
    for field in db.ents("public variable"):
        if (str(field) == str(class_name + "." + field_name)):
            # get path file include this field.
            print(field)
            if (field.parent().parent().relname() is not None):
                mainfile = field.parent().parent().longname()
                print(mainfile)
                print(field.parent().parent().longname())
            else:
                for ref in field.refs("Definein"):
                    mainfile = (ref.file().longname())
                    print(mainfile)
                    print(ref.file().relname())
            # get propagate class and their file
            for ref in field.refs("Setby , Useby"):
                if not (str(ref.ent()) == str(field.parent())
                        or str(ref.ent().parent()) == str(field.parent())):
                    propagate_classes.add(str(ref.ent().parent()))
                    file_list_to_be_propagate.add(ref.file().longname())

    file_list_to_be_propagate = list(file_list_to_be_propagate)
    propagate_classes = list(propagate_classes)
    flag_file_is_refatored = False
    corpus = open(
        r"../filename_status_database.txt", encoding="utf-8").read()
    if corpus.find("name:" + mainfile) == -1:
        with open("../filename_status_database.txt", mode='w', encoding="utf-8", newline='') as f:
            f.write(corpus + "\nname:" + mainfile)
            f.flush()
            os.fsync(f.fileno())
        file_list_include_file_name_that_edited += mainfile + "\n"
    else:
        flag_file_is_refatored = True
        print("file already edited")
    print(mainfile)
    stream = FileStream(mainfile, encoding='utf8')
    # Step 2: Create an instance of AssignmentStLexer
    lexer = JavaLexer(stream)
    # Step 3: Convert the input source into a list of tokens
    token_stream = CommonTokenStream(lexer)
    # Step 4: Create an instance of the AssignmentStParser
    parser = JavaParser(token_stream)
    parser.getTokenStream()
    parse_tree = parser.compilationUnit()
    my_listener = IncreaseFieldVisibilityRefactoringListener(common_token_stream=token_stream,
                                                             source_class=class_name,
                                                             field_name=field_name)
    walker = ParseTreeWalker()
    walker.walk(t=parse_tree, listener=my_listener)

    with open(mainfile, "w") as f:
        f.write(my_listener.token_stream_rewriter.getDefaultText())

    print(file_list_to_be_propagate)
    for file in file_list_to_be_propagate:
        flag_file_edited = False
        corpus = open(
            r"filename_status_database.txt", encoding="utf-8").read()
        if (corpus.find("name:" + file) == -1):
            with open("filename_status_database.txt", mode='w', encoding="utf-8", newline='') as f:
                f.write(corpus + "\nname:" + file)
                f.flush()
                os.fsync(f.fileno())
            file_list_include_file_name_that_edited += file + "\n"
        else:
            flag_file_edited = True
        print(file)
        stream = FileStream(file, encoding='utf8')
        # input_stream = StdinStream()
        # Step 2: Create an instance of AssignmentStLexer
        lexer = JavaLexer(stream)
        # Step 3: Convert the input source into a list of tokens
        token_stream = CommonTokenStream(lexer)
        # Step 4: Create an instance of the AssignmentStParser
        parser = JavaParser(token_stream)
        parser.getTokenStream()
        parse_tree = parser.compilationUnit()

        # get object
        my_listener_get_object = PropagationIncreaseFieldVisibility_GetObjects_RefactoringListener(token_stream,
                                                                                                   source_class=class_name,
                                                                                                   propagated_class_name=propagate_classes)
        walker = ParseTreeWalker()
        walker.walk(t=parse_tree, listener=my_listener_get_object)

        my_listener = PropagationIncreaseFieldVisibilityRefactoringListener(common_token_stream=token_stream,
                                                                            using_field_name=field_name,
                                                                            object_name=my_listener_get_object.objects,
                                                                            propagated_class_name=propagate_classes)
        walker = ParseTreeWalker()
        walker.walk(t=parse_tree, listener=my_listener)

        with open(file, "w") as f:
            f.write(my_listener.token_stream_rewriter.getDefaultText())