def main():
    try:
        f1 = open(sys.argv[1], "rt")
        x = f1.readline()
        while x.startswith("#include"):
            x = f1.readline()
        f2 = open("program.c", "wt")
        f2.write(f1.read())
        f1.close()
        f2.close()

        file = FileStream("program.c")
        lexer = CLexer(file)
        stream = CommonTokenStream(lexer)
        parser = CParser(stream)
        tree = parser.compilationUnit()

        listener = myCListener()
        walker = ParseTreeWalker()
        walker.walk(listener, tree)

        listener.show()
    except IndexError:
        print("Error - You forgot to enter a elf file")
        sys.exit(1)
    except FileNotFoundError:
        print("Error - Please enter a valid elf file")
        sys.exit(1)
示例#2
0
def main():

    parser = argparse.ArgumentParser(
        description='Generate performance model part a')
    parser.add_argument('-kernelfile',
                        type=str,
                        nargs=1,
                        help='Input file (OpenCL)',
                        required=True)
    parser.add_argument('-v',
                        action='store_true',
                        help='Verbose mode to print more',
                        required=False)

    args = parser.parse_args()

    verbosity_needed = False
    if args.v:
        verbosity_needed = True
        print 'Verbose mode'

    filename = args.kernelfile[0]
    print filename
    ipt = antlr4.FileStream(args.kernelfile[0])

    lexer = CLexer(ipt)
    stream = antlr4.CommonTokenStream(lexer)
    parser = CParser(stream)

    tree = parser.compilationUnit()

    printer = CPrintListener(filename, verbosity_needed)

    walker = antlr4.ParseTreeWalker()
    walker.walk(printer, tree)
示例#3
0
    def parse(self, file):
        lexer = CLexer(FileStream(os.path.dirname(os.path.abspath(__file__)) + "/" + file))
        stream = CommonTokenStream(lexer)
        parser = CParser(stream)
        parser.prog()

        return parser.getNumberOfSyntaxErrors()
示例#4
0
文件: main.py 项目: zhangguof/x86emu
def main(argv):
    if len(argv) > 1:
        name = argv[1]
    else:
        name = "2.c"

    if len(argv) > 2:
        out_file = argv[2]
    else:
        out_file = "2_out.c"
    if len(argv) > 3:
        start_idx = int(argv[3])
    else:
        start_idx = 100

    # input_stream = FileStream(name)
    input_stream = MyFileStream(name)
    lexer = CLexer(input_stream)
    stream = CommonTokenStream(lexer)
    parser = CParser(stream)

    tree = parser.compilationUnit()

    v = MyVisitor()
    v.init()
    v.visit(tree)
    funcs = v.get_funcs()
    # pprint.pprint(funcs)

    gen_code.gen_c_file(funcs, out_file, start_idx)
示例#5
0
def main(argv):
    input = FileStream("../test/" + argv[1] + ".c")
    lexer = CLexer(input)
    stream = CommonTokenStream(lexer)
    parser = CParser(stream)
    tree = parser.compilationUnit()
    v = MyVisitor()
    f = open("../test/" + argv[1] + ".py", "w")
    f.write(v.visit(tree) + "\nmain()")
    f.close()
def main():
    file = FileStream("gzunBm.c")
    # file = FileStream("RXP.c")
    lexer = CLexer(file)
    stream = CommonTokenStream(lexer)
    parser = CParser(stream)
    tree = parser.compilationUnit()

    visitor = my_visitor()
    visitor.visit(tree)
    visitor.show()
示例#7
0
 def ParseFileWithClearedPPDirective(self):
     self.PreprocessFileWithClear()
     # restore from ListOfList to ListOfString
     self.Profile.FileLinesList = ["".join(list) for list in self.Profile.FileLinesList]
     FileStringContents = ''
     for fileLine in self.Profile.FileLinesList:
         FileStringContents += fileLine
     cStream = antlr3.StringStream(FileStringContents)
     lexer = CLexer(cStream)
     tStream = antlr3.CommonTokenStream(lexer)
     parser = CParser(tStream)
     parser.translation_unit()
示例#8
0
def main(argv):

    inputFile = FileStream(argv[1])
    lexer = CLexer(inputFile)
    stream = CommonTokenStream(lexer)
    parser = CParser(stream)
    tree = parser.prog()

    if parser.getNumberOfSyntaxErrors():
        return

    # Visualise parse tree
    parseTreeDotGen = ParseTreeDotGenerator()
    parseTreeDotGen.generateDOT(parser,
                                tree,
                                "output/parse_tree.gv",
                                render=False)

    # Build AST
    astBuilder = ASTBuilder()
    AST = astBuilder.visit(tree)

    # Semantic Validation
    semanticValidator = SemanticValidator()
    AST.accept(semanticValidator)

    # Print errors, if any
    if semanticValidator.errors:
        for error in semanticValidator.errors:
            print("ERROR: " + error)
        return

    # Code optimiser
    optimiser = Optimiser(semanticValidator.symbolTable)
    AST.accept(optimiser)

    # Print warnings, if any
    if optimiser.warnings:
        for warning in optimiser.warnings:
            print("WARNING: " + warning)

    # Visualise AST
    dotGraph = AST.visit(DotGraphBuilder)
    dotGraph.render("output/ast.gv", view=False)

    # Code generator
    codeGenerator = None
    if 2 <= len(argv) - 1:
        codeGenerator = CodeGenerator(optimiser.symbolTable, argv[2])
    else:
        codeGenerator = CodeGenerator(optimiser.symbolTable)
    AST.accept(codeGenerator)
示例#9
0
def main():
    file = FileStream("gzunBm.c")
    # file = FileStream("RXP.c")
    lexer = CLexer(file)
    stream = CommonTokenStream(lexer)
    parser = CParser(stream)
    tree = parser.compilationUnit()

    listener = myCListener()
    walker = ParseTreeWalker()
    walker.walk(listener, tree)

    listener.show()
示例#10
0
    def semanticAnalyse(self, file):
        lexer = CLexer(FileStream(os.path.dirname(os.path.abspath(__file__)) + "/" + file))
        stream = CommonTokenStream(lexer)
        parser = CParser(stream)
        tree = parser.prog()

        astBuilder = ASTBuilder()
        AST = astBuilder.visit(tree)

        semanticValidator = SemanticValidator()
        AST.accept(semanticValidator)

        return semanticValidator.errors
示例#11
0
文件: main.py 项目: cekefun/Compilers
def main(argv):
    Myinput = FileStream(argv[1])
    lexer = CLexer(Myinput)
    stream = CommonTokenStream(lexer)
    parser = CParser(stream)
    antlrtree = parser.program()
    visitor = CustomVisitor()

    t = visitor.visit(tree=antlrtree)

    t.toDot("test.dot")

    t.minimize()

    t.toDot("test2.dot")
示例#12
0
def main():
    files = [
        join('./testFiles', f) for f in listdir('./testFiles')
        if isfile(join('./testFiles', f))
    ]
    for file in files:
        lexer = CLexer(FileStream(file))
        stream = CommonTokenStream(lexer)
        parser = CParser(stream)
        tree = parser.compilationUnit()
        ans = PYVisitor().visit(tree)

        with open('./output/' + file.split('/')[-1].split('.')[0] + '.py',
                  'w') as output:
            output.write(ans)
            output.close()
示例#13
0
    def build(self):
        """Build the AST"""
        input = FileStream(self.filepath)
        lexer = CLexer(input)
        stream = CommonTokenStream(lexer)
        parser = CParser(stream)
        tree = parser.program()

        # Add initial program node
        self.AST = Program(self.sym)

        # Add statements
        for i in range(tree.getChildCount()):
            self.AST.addStatement(self.buildStatement(tree.getChild(i)))

        return self.AST
示例#14
0
    def semanticAnalyse(self, file):
        lexer = CLexer(
            FileStream(
                os.path.dirname(os.path.abspath(__file__)) + "/" + file))
        stream = CommonTokenStream(lexer)
        parser = CParser(stream)
        tree = parser.prog()

        astBuilder = ASTBuilder()
        AST = astBuilder.visit(tree)
        oldAST = deepcopy(AST)

        semanticValidator = SemanticValidator()
        AST.accept(semanticValidator)

        optimiser = Optimiser(semanticValidator.symbolTable)
        AST.accept(optimiser)

        return optimiser.warnings, oldAST, AST
示例#15
0
def main(argv):
    input_stream_root = argv[1]
    files = find_c_files(input_stream_root)
    for file in files:
        print()
        print("File name: " + file)
        try:
            input_stream = FileStream(file)
            lexer = CLexer(input_stream)
            stream = CommonTokenStream(lexer)
            stream.fill()
            rewriter = TokenStreamRewriter(tokens=stream)
            parser = CParser(stream)
            tree = parser.compilationUnit()
            function = Functions()
            ptw = ParseTreeWalker()
            ptw.walk(function, tree)
            if len(function.functions) > 0:
                print("Funciones:")
                for function in function.functions:
                    if function[FUNCTION_LINES] > MAX_SAFE_LINES:
                        print(
                            "Atencion: Esta funcion tiene más de",
                            MAX_SAFE_LINES, "lineas, puede ser posible "
                            "dividirla en subfunciones")
                        rewriter.insertBefore(
                            "default", function[FUNCTION_START],
                            "/* Es posible que esta funcion "
                            "pueda ser refactorizada */ \n")
                        ugly_code = rewriter.getText("default", 0, 9000)

                        update_file(file, ugly_code)

                    print("\t", function)
            else:
                print("No se encontraron funciones")
        except UnicodeDecodeError:
            print(
                'Salteando archivo por un error de tipo "UnicodeDecodeError"')

    print("\nCantidad de archivos", len(files))
示例#16
0
def read_source_file(input_file=None):
    from CLexer import CLexer
    from CParser import CParser

    assert input_file is not None
    try:
        char_stream = antlr4.FileStream(input_file)
        lexer = CLexer(char_stream)
        tokens = antlr4.CommonTokenStream(lexer)
        parser = CParser(tokens)
        print parser
    except RuntimeError as e:
        print e
示例#17
0
def main(argv):
    input = FileStream(argv[1])
    lexer = CLexer(input)

    stream = CommonTokenStream(lexer)
    parser = CParser(stream)
    tree = parser.compilationUnit()
    # print(tree)
    ast = tree.toStringTree(recog=parser)
    # print(ast)

    #### CLASS NAME {
    f = open("output.java", "w+")
    f.write("public class Test {\n")
    f.close()
    v2 = MyCVisitor2()
    v2.visit(tree)
    #     HERE WE START VISITING THE NODES OF THE VISITOR

    #### }

    f = open("output.java", "a+")
    f.write("}")
    f.close()
def main():

    #####################  INITIALIZATION #####################################################################
    inclusion_factor_overall = 0.0
    redundancy_factor_overall=0.0
    optimal_factors_overall = (inclusion_factor_overall,redundancy_factor_overall)
    files = []
    w = []
    for i in range(0, 31):
        w.append(random.uniform(-1, 1))
    print("w - ",w)
    wmax = w
    count_progs=0
    for filename in os.listdir("c_prog"):
        files.append(filename)
        count_progs+=1
        if count_progs == 2:
            break
        # if count_progs==2 or count_progs==21:
        #     continue
        print(count_progs)
        print(filename)
        c_fileName = "c_prog/"+filename
        input_file = FileStream(c_fileName)
        lexer = CLexer(input_file)
        stream = CommonTokenStream(lexer)
        parser = CParser(stream)
        tree = parser.compilationUnit()
        #print(tree.toStringTree(recog=parser))


        #print("Start Walking...")
        v = MyCVisitor()
        v.visit(tree)
        #print(v.getVarList())

        v1 = MyCVisitor2()
        v1.visit(tree)
        #print("\n\n\n", v1.getCrudeCfg())
        #vari = input("Enter a variable name: ")
        #linenum = input("Enter a line number: ")







        ###############   TOTAL LINES ##################################
        cfg_string = v1.getCrudeCfg()
        cfg_textdict = v1.getdict()
        #print(cfg_textdict)
        #print(cfg_string)
        cfg_list = cfg_string.split(' ')
        #print(cfg_list)
        cfg_list = [x for x in cfg_list if x != '']
        #print(cfg_list)
        total_lines = 0
        for i in range(len(cfg_list) - 1, -1, -1):
            if cfg_list[i].isdigit():
                total_lines = int(cfg_list[i])
                break
        #print(total_lines)

        #############################################################3
        line_features = [[0] * 8 for i in range(0,total_lines)]
        # print(line_features)


























     ######################################################################################



    ##################  ALL PREREQUISITE CALCULATIONS #####################################
        variables_list = v.getVarList()
        c_outfile = "c_prog_slice_half/" + filename[:-2]+".txt"
        actual_slice_file = open(c_outfile, "r")
        # print(actual_slice_file)

        #
        actual_slice_text = actual_slice_file.read().splitlines()
        # print(actual_slice_text)
        actual_slice_dictionary = {}
        count1=0
        for i in actual_slice_text:
            count1+=1
            #print(count1)
            key_value = i.split(":")
            #print("ashish ranjasn",key_value)
            #print(i)
            #print(key_value[0])
            #print(key_value[1])

            # key_value[1].replace(",\n",'')
            #print(type(actual_slice_dictionary))
            actual_slice_dictionary[key_value[0]] = key_value[1]
        #print("akranjanam",actual_slice_dictionary)
        #new_slice = [i.replace('"', '') for i in actual_slice_dictionary]
        #print(actual_slice_dictionary)
        for key, value in actual_slice_dictionary.items():
            #print(len(actual_slice_dictionary))
            temp_str1 = ''
            temp_str2=''
            for i in range(2,len(key)):
                if key[i]=="'":
                    break
                else:
                    temp_str1=temp_str1+(key[i])
            for j in range(i+3,len(key)):
                if key[j]=="'":
                    break
                else:
                    temp_str2+=key[j]
            #print("ashishnew",temp_str1,temp_str2)
            vari = temp_str1
            linenum=temp_str2
            #print(vari)
            #print(linenum)

            ################################### potential_lines ###########################
            #print(linenum," ",cfg_list)
            index_linenum = cfg_list.index(linenum)
            potential_lines = list(range(1, int(linenum)))
            potential_flag = 0
            for i in range(index_linenum, -1, -1):
                if cfg_list[i] == ']':
                    break
                if cfg_list[i] == '[':
                    # print("yes")

                    if cfg_list[i - 2] == "while" or cfg_list[i-2]=="for":
                        j = i - 2
                        countopen = 1
                        # print("yes")
                        while (countopen > 0):
                            if (cfg_list[j] == '['):
                                countopen = countopen + 1
                            if (cfg_list[j] == ']'):
                                countopen = countopen - 1
                            # print("yes")
                            if (cfg_list[j].isdigit()):
                                # print("hello")
                                if (cfg_list[j] != linenum and int(cfg_list[j]) not in potential_lines):
                                    potential_lines.append(int(cfg_list[j]))
                            j = j + 1



                        break
                #print("-------------c----------")


            #print("-------------A----------")
            #print(potential_lines)
            act_str = actual_slice_dictionary[str((vari, linenum)).replace(' ', '')]
            #print(vari, linenum)
            #print(act_str)



            ########################### RANDOM SAMPLING ALGORITHM FOR CODE-BASE #######################################


            potential_line_scores = [0] * len(potential_lines)
            #print("potential line score - ",potential_line_scores)
            #print("-------------d----------")
            #print(potential_lines)
            #print(cfg_textdict)
            potential_line_features = feature_calculation(vari,linenum,cfg_textdict,cfg_list,total_lines,variables_list,potential_lines)
            #print("potential line features - ",potential_line_features)
            #print("-------------e-----------")

            #enumerate through the potential lines
            k = 0
            for i in range(len(potential_line_scores)):
                # line_scores.append(0)
                k+=1
                for j in range(len(w)):
                    #if k<2:
                        #print("w[",j,"] - ",w[j])
                    potential_line_scores[i] = potential_line_scores[i] + (w[j] * potential_line_features[i][j])
                    #if k<2 and potential_line_features[i][j]>0:
                        #print("potential_line_scores[", i, "] - ", potential_line_scores[i])
            #print(potential_lines)
            #print("features[5] - ", potential_line_features[5])
            #print("w - ",w)
            #print("score[5] - ",potential_line_scores[5])
            #print(potential_line_scores)
            #print("-------------f----------")
            #print(len(potential_lines))
            if len(potential_lines)>20:
                continue
            abstraction_scores = [0] * (2 ** (len(potential_lines)))
            allsubsets = lambda n: list(itertools.chain(*[itertools.combinations(range(n), ni) for ni in range(n + 1)]))

            # print(allsubsets(7))
            maxim = -1000000
            #print(potential_lines)
            if len(potential_lines)>20:
                continue
            subsets = allsubsets(len(potential_lines))
            #print("akr ",len(subsets))

            for i in range(len(subsets)):
                for j in subsets[i]:
                    abstraction_scores[i] = abstraction_scores[i] + potential_line_scores[j]
                if abstraction_scores[i] > maxim:
                    maxim = abstraction_scores[i]
                    optimal_abstraction_indices = subsets[i]
            #print("-------------g----------")

            optimal_abstraction_lines=[]
            inclusion_score=0
            exclusion_score=0
            inclusion_factor=0
            redundancy_factor=0
            if len(optimal_abstraction_indices):
                for i in optimal_abstraction_indices:
                    optimal_abstraction_lines.append(potential_lines[i])
            #print(potential_lines)
            #print("shikhar",optimal_abstraction_lines)
            actual_slice = eval(act_str)
            #print(actual_slice)
            #print(type(actual_slice))
            #print(type(actual_slice))
            # print(abstraction_scores)
            #print(optimal_abstraction_indices)
            actual_output = []
            if len(optimal_abstraction_lines)>0:
                for x in optimal_abstraction_lines:
                    if x in actual_slice:
                        inclusion_score = inclusion_score + 1
                    else:
                        exclusion_score = exclusion_score + 1
            size_of_slice = len(actual_slice)
            #print(size_of_slice)
            if len(actual_slice) > 0 and len(optimal_abstraction_lines) > 0:
                inclusion_factor = float(inclusion_score) / float(size_of_slice)
                redundancy_factor = float(exclusion_score) / float(len(optimal_abstraction_lines))
            #
            optimal_factors = (inclusion_factor, redundancy_factor)
            optimal_slice = optimal_abstraction_lines
            #print(actual_slice)
            #print("result",optimal_slice)
            #print(optimal_factors)
            #print(maxim)
            inclusion_factor_overall += inclusion_factor
            redundancy_factor_overall -= redundancy_factor
            optimal_factors_overall = (inclusion_factor_overall,redundancy_factor_overall)
            #print(wmax)
            #print("ankur",optimal_factors_overall)
        #print("-------------B----------")

    ###################################################### REPETITION ###################################################
    iterations = 2

    for i in range(iterations):
        w = []
        print("Processing")
        for i in range(0, 31):
            w.append(random.uniform(-1, 1))
        #print(w)
        wcurrent = w
        inclusion_factor_repeat = 0.0
        redundancy_factor_repeat = 0.0

        count_progs=0
        for filename in files:
            count_progs += 1
            if count_progs == 2:
                break
            c_fileName = "c_prog/"+filename
            input_file = FileStream(c_fileName)
            lexer = CLexer(input_file)
            stream = CommonTokenStream(lexer)
            parser = CParser(stream)
            tree = parser.compilationUnit()
            #print(tree.toStringTree(recog=parser))


            #print("Start Walking...")
            v = MyCVisitor()
            v.visit(tree)
            #print(v.getVarList())

            v1 = MyCVisitor2()
            v1.visit(tree)
            #print("\n\n\n", v1.getCrudeCfg())
            #vari = input("Enter a variable name: ")
            #linenum = input("Enter a line number: ")


            ###############   TOTAL LINES ##################################
            cfg_string = v1.getCrudeCfg()
            cfg_textdict = v1.getdict()
            #print(cfg_textdict)
            #print(cfg_string)
            cfg_list = cfg_string.split(' ')
            #print(cfg_list)
            cfg_list = [x for x in cfg_list if x != '']
            #print(cfg_list)
            total_lines = 0
            for i in range(len(cfg_list) - 1, -1, -1):
                if cfg_list[i].isdigit():
                    total_lines = int(cfg_list[i])
                    break
            #print(total_lines)

            #############################################################3
            line_features = [[0] * 8 for i in range(0,total_lines)]
            # print(line_features)


        ##################  ALL PREREQUISITE CALCULATIONS #####################################
            variables_list = v.getVarList()
            #print()
            c_outfile = "c_prog_slice_half/" + filename[:-2]+".txt"
            actual_slice_file = open(c_outfile, "r")

            #
            actual_slice_text = actual_slice_file.read().splitlines()
            #print("actual slice text - ",actual_slice_text)
            actual_slice_dictionary = {}
            for i in actual_slice_text:
                key_value = i.split(":")
                # print(key_value[0])
                # print(key_value[1])
                # key_value[1].replace(",\n",'')
                actual_slice_dictionary[key_value[0]] = key_value[1]
            new_slice = [i.replace('"', '') for i in actual_slice_dictionary]

            for key, value in actual_slice_dictionary.items():
                temp_str1 = ''
                temp_str2=''
                for i in range(2,len(key)):
                    if key[i]=="'":
                        break
                    else:
                        temp_str1=temp_str1+(key[i])
                for j in range(i+3,len(key)):
                    if key[j]=="'":
                        break
                    else:
                        temp_str2+=key[j]
                #print("ashishnew",temp_str1,temp_str2)
                vari = temp_str1
                linenum=temp_str2



                ################################### potential_lines ###########################
                index_linenum = cfg_list.index(linenum)
                potential_lines = list(range(1, int(linenum)))
                potential_flag = 0
                for i in range(index_linenum, -1, -1):
                    if cfg_list[i] == ']':
                        break
                    if cfg_list[i] == '[':
                        # print("yes")

                        if cfg_list[i - 2] == "while" or cfg_list[i-2]=="for":
                            j = i - 2
                            countopen = 1
                            # print("yes")
                            while (countopen > 0):
                                if (cfg_list[j] == '['):
                                    countopen = countopen + 1
                                if (cfg_list[j] == ']'):
                                    countopen = countopen - 1
                                # print("yes")
                                if (cfg_list[j].isdigit()):
                                    # print("hello")
                                    if (cfg_list[j] != linenum and int(cfg_list[j]) not in potential_lines):
                                        potential_lines.append(int(cfg_list[j]))
                                j = j + 1
                            break
                #print(potential_lines)
                act_str = actual_slice_dictionary[str((vari, linenum)).replace(' ', '')]
                #print(vari, linenum)
                #print(act_str)

                ########################### RANDOM SAMPLING ALGORITHM FOR CODE-BASE #######################################





                potential_line_scores = [0] * len(potential_lines)
                #print(potential_line_scores)
                potential_line_features = feature_calculation(vari,linenum,cfg_textdict,cfg_list,total_lines,variables_list,potential_lines)

                for i in range(len(potential_line_scores)):
                    # line_scores.append(0)
                    for j in range(len(w)):
                        potential_line_scores[i] = potential_line_scores[i] + (w[j] * potential_line_features[i][j])
                #print(potential_lines)
                #print(potential_line_scores)
                if len(potential_lines) > 20:
                    continue

                abstraction_scores = [0] * (2 ** (len(potential_lines)))
                allsubsets = lambda n: list(itertools.chain(*[itertools.combinations(range(n), ni) for ni in range(n + 1)]))
                # print(allsubsets(7))
                maxim = -1000000
                if len(potential_lines) > 20:
                    continue

                subsets = allsubsets(len(potential_lines))
                #print(len(subsets))
                for i in range(len(subsets)):
                    for j in subsets[i]:
                        abstraction_scores[i] = abstraction_scores[i] + potential_line_scores[j]
                    if abstraction_scores[i] > maxim:
                        maxim = abstraction_scores[i]
                        optimal_abstraction_indices = subsets[i]
                optimal_abstraction_lines=[]
                inclusion_score=0
                exclusion_score=0
                inclusion_factor=0
                redundancy_factor=0
                if len(optimal_abstraction_indices):
                    for i in optimal_abstraction_indices:
                        optimal_abstraction_lines.append(potential_lines[i])
                #print(potential_lines)
                #print("shikhar",optimal_abstraction_lines)
                actual_slice = eval(act_str)
                #print(type(actual_slice))
                # print(abstraction_scores)
                #print(optimal_abstraction_indices)
                actual_output = []
                for x in optimal_abstraction_lines:
                    if x in actual_slice:
                        inclusion_score = inclusion_score + 1
                    else:
                        exclusion_score = exclusion_score + 1
                size_of_slice = len(actual_slice)
                #print(size_of_slice)
                if len(actual_slice) > 0 and len(optimal_abstraction_lines) > 0:
                    inclusion_factor = float(inclusion_score) / float(size_of_slice)
                    redundancy_factor = float(exclusion_score) / float(len(optimal_abstraction_lines))
                #
                optimal_factors = (inclusion_factor, redundancy_factor)
                optimal_slice = optimal_abstraction_lines
                #print(actual_slice)
                #print("result",optimal_slice)
                #print(optimal_factors)
                #print(maxim)

                inclusion_factor_repeat += inclusion_factor
                redundancy_factor_repeat -= redundancy_factor
        if inclusion_factor_repeat > inclusion_factor_overall and redundancy_factor_repeat < redundancy_factor_overall :
            wmax = wcurrent
            inclusion_factor_overall = inclusion_factor_repeat
            redundancy_factor_overall = redundancy_factor_repeat

        # if count_progs==1:
        #     break

    print(optimal_factors_overall)
    #print("Optimal w calculated is: ",wmax)


    with open('woptimal.txt', 'w') as f:
        for item in wmax:
            f.write("%s\n" % item)
        f.write(str(iterations))


    wfinalized = wmax

    ##################################  SLICE CHECKER : TESTING NEW SLICE ###########################################################

    w_two_thousand = [-0.9802029567666375, -0.5132642928713214, 0.5706250114071445, 0.025339485495515124, 0.8690276583782182, 0.655824155349916, 0.5022090912165711, 0.5051806922382631, 0.18557630733413233, 0.9212306986303516, 0.8968037088584999, 0.6437325270397469, 0.44476379829802837, -0.8536929468517367, 0.4222117335201401, -0.40847059242421513, 0.761763193101407, 0.35349177299679035, -0.21655973252123006, -0.9960377998232928, 0.7445876728783363, 0.5148185200640216, 0.7902944953631392, -0.5290561661682993, 0.9317691733109377, -0.7559530075150869, 0.9164650786054349, -0.8159237870065996, -0.6271712369874187, 0.02828103104751878, -0.4630512136868439]

    wmax = wfinalized
    print(wmax)

    while True:
        print("Enter 0 to stop testing , else enter 1")
        check = int(input())
        if check==0:
            break
        in_file = input("Enter the c program file name: ")
        #out_file = input("Enter the slice text file name: ")
        test_variable = input("Enter a variable name: ")
        test_line = input("Enter a line number: ")
        c_fileName = "c_prog/" + in_file
        c_outfile = "c_prog_slice_half/" + in_file[:-2] + ".txt"



        input_file = FileStream(c_fileName)
        lexer = CLexer(input_file)
        stream = CommonTokenStream(lexer)
        parser = CParser(stream)
        tree = parser.compilationUnit()
        #print(tree.toStringTree(recog=parser))

        #print("Start Walking...")
        v = MyCVisitor()
        v.visit(tree)
        #print(v.getVarList())

        v1 = MyCVisitor2()
        v1.visit(tree)
        #print("\n\n\n", v1.getCrudeCfg())

        cfg_string = v1.getCrudeCfg()
        cfg_textdict = v1.getdict()
        #print(cfg_textdict)
        #print(cfg_string)
        cfg_list = cfg_string.split(' ')
        #print(cfg_list)
        cfg_list = [x for x in cfg_list if x != '']
        #print(cfg_list)
        total_lines = 0
        for i in range(len(cfg_list) - 1, -1, -1):
            if cfg_list[i].isdigit():
                total_lines = int(cfg_list[i])
                break
        #print(total_lines)

        variables_list = v.getVarList()
        ################# potential lines #####################3
        index_linenum = cfg_list.index(test_line)
        #print(index_linenum)
        potential_lines = list(range(1, int(test_line)))
        potential_flag = 0
        for i in range(index_linenum, -1, -1):
            if cfg_list[i] == ']':
                break
            if cfg_list[i] == '[':
                # print("yes")

                if cfg_list[i - 2] == "while":
                    j = i - 2
                    countopen = 1
                    # print("yes")
                    while (countopen > 0):
                        if (cfg_list[j] == '['):
                            countopen = countopen + 1
                        if (cfg_list[j] == ']'):
                            countopen = countopen - 1
                        # print("yes")
                        if (cfg_list[j].isdigit()):
                            # print("hello")
                            if (cfg_list[j] != test_line and int(cfg_list[j]) not in potential_lines):
                                potential_lines.append(int(cfg_list[j]))
                        j = j + 1
                    break
        #print(potential_lines)

        potential_line_features = []
        # for i in range(total_lines):
        #     if i + 1 in potential_lines:
        #         potential_line_features.append(line_features[i])
        # print(potential_line_features)
        potential_line_scores = [0] * len(potential_lines)

        potential_line_features = feature_calculation(test_variable, test_line, cfg_textdict, cfg_list, total_lines, variables_list,potential_lines)
        for i in range(len(potential_line_scores)):
            # line_scores.append(0)
            for j in range(len(wmax)):
                potential_line_scores[i] = potential_line_scores[i] + (wmax[j] * potential_line_features[i][j])
        #print(potential_line_scores)
        abstraction_scores = [0] * (2 ** (len(potential_lines)))
        allsubsets = lambda n: list(itertools.chain(*[itertools.combinations(range(n), ni) for ni in range(n + 1)]))
        maxim = -1000000
        subsets = allsubsets(len(potential_lines))
        #print(len(subsets))
        for i in range(len(subsets)):
            for j in subsets[i]:
                abstraction_scores[i] = abstraction_scores[i] + potential_line_scores[j]
            if abstraction_scores[i] > maxim:
                maxim = abstraction_scores[i]
                optimal_abstraction_indices = subsets[i]
        # print(abstraction_scores)
        #print(optimal_abstraction_indices)
        optimal_abstraction_lines = []
        if len(optimal_abstraction_indices):
            for i in optimal_abstraction_indices:
                optimal_abstraction_lines.append(potential_lines[i])
        #print(optimal_abstraction_lines)
        optimal_slice_final = optimal_abstraction_lines
        #print(optimal_slice_final)

        actual_slice_file = open(c_outfile, "r")
        actual_slice_text = actual_slice_file.read().splitlines()
        #print("actual slice text - ", actual_slice_text)
        actual_slice_dictionary = {}
        for i in actual_slice_text:
            key_value = i.split(":")
            # print(key_value[0])
            # print(key_value[1])
            # key_value[1].replace(",\n",'')
            actual_slice_dictionary[key_value[0]] = key_value[1]
        #new_slice = [i.replace('"', '') for i in actual_slice_dictionary]
        #print("new slice - ", new_slice)
        print("actual slice dictionary - ", actual_slice_dictionary)

        actual_slice_key = "('"+test_variable+"','"+test_line+"')"
        print("actual slice key - ",actual_slice_key)
        actual_slice = actual_slice_dictionary[actual_slice_key]
        print("updated actual slice - ", actual_slice)

        #
        ##########################   IGNORED  ######################################################
        # actual_slice_text = actual_slice_file.read().splitlines()
        # # print(actual_slice_text)
        # actual_slice_dictionary = {}
        # for i in actual_slice_text:
        #     key_value = i.split(":")
        #     # print(key_value[0])
        #     # print(key_value[1])
        #     # key_value[1].replace(",\n",'')
        #     actual_slice_dictionary[key_value[0]] = key_value[1]
        # act_str = actual_slice_dictionary[str((test_variable, test_line)).replace(' ', '')]
        # #print(act_str)
        actual_slice = eval(act_str)
        size_of_slice = len(actual_slice)
        inclusion_score=0.0
        exclusion_score=0.0
        inclusion_factor=0.0
        redundancy_factor=0.0
        # for x in optimal_abstraction_lines:
        #     if x in actual_slice:
        #         inclusion_score = inclusion_score + 1
        #     else:
        #         exclusion_score = exclusion_score + 1
        # size_of_slice = len(actual_slice)
        # # print(size_of_slice)
        # if len(actual_slice) > 0 and len(optimal_abstraction_lines) > 0:
        #     inclusion_factor = float(inclusion_score) / float(size_of_slice)
        #     redundancy_factor = float(exclusion_score) / float(len(optimal_abstraction_lines))
        # optimal_factors = (inclusion_factor,redundancy_factor)

        #########################################################################################
        print("Actual Slice is: ", actual_slice)
        print("Predicted Slice is: ", optimal_slice_final)
示例#19
0
def main(argv):
    input_stream = FileStream(argv[1])
    lexer = CLexer(input_stream)
    stream = CommonTokenStream(lexer)
    parser = CParser(stream)
    tree = parser.source_code()
示例#20
0
    for function in functions:
        doFunction(functions[function], function)


########################### TYPES ##############################################
def doType(c_type):
    cor[c_type] = c_type
    corp[c_type] = 'POINTER(' + c_type + ')'

def doTypes(c_types):
    print(c_types)
    for c_type in c_types:
        doType(c_type)


################################################################################
if __name__ == '__main__':
    cheader = sys.argv[1]
    p = CParser(cheader)
    p.processAll()

    for k in p.dataList:
        if bool(p.defs[k]):
            if k is 'types':
                doTypes(p.defs[k])
            if k is 'structs':
                doStructs(p.defs[k])
            if k is 'functions':
                doFunctions(p.defs[k])

示例#21
0
import sys
import antlr3
from CLexer import CLexer
from CParser import CParser

cStream = antlr3.StringStream(open(sys.argv[1]).read())
lexer = CLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = CParser(tStream)
parser.translation_unit()
示例#22
0
                 "events). Can be None.")),
    ])

    if not os.path.exists(PATH):
        os.makedirs(PATH)

    with open_doc_file(os.path.join(PATH, "__init__.py")) as f:
        obj = sys.modules["Atrinik"]
        dump_docstring(obj, f)
        names = dump_obj(obj, f, defaults=defaults)
        line = "__all__ = [{}]".format(", ".join(names))
        f.write(textwrap.fill(line, width=80,
                              subsequent_indent=" " * 11))
        f.write("\n")

if not GetSettings()["unit_tests"] and not GetSettings()["plugin_unit_tests"]:
    parser = CParser()
    matches = {}

    def scan(path):
        for root, dirs, files in os.walk(path):
            for file in files:
                matches.update(parser.parse(os.path.join(root, file)))

    scan("src/server")
    scan("src/plugins/plugin_python/include")
    scan("src/include")
    matches.update(parser.parse("../common/toolkit/socket.h"))

    main()
示例#23
0
                 "events). Can be None.")),
    ])

    if not os.path.exists(PATH):
        os.makedirs(PATH)

    with open_doc_file(os.path.join(PATH, "__init__.py")) as f:
        obj = sys.modules["Atrinik"]
        dump_docstring(obj, f)
        names = dump_obj(obj, f, defaults=defaults)
        line = "__all__ = [{}]".format(", ".join(names))
        f.write(textwrap.fill(line, width=80,
                              subsequent_indent=" " * 11))
        f.write("\n")

if not GetSettings()["unit_tests"] and not GetSettings()["plugin_unit_tests"]:
    parser = CParser()
    matches = {}

    def scan(path):
        for root, dirs, files in os.walk(path):
            for file in files:
                matches.update(parser.parse(os.path.join(root, file)))

    scan("src/server")
    scan("src/plugins/plugin_python/include")
    scan("src/include")
    matches.update(parser.parse("src/toolkit/include/socket.h"))

    main()
示例#24
0
                 "events). Can be None.")),
    ])

    if not os.path.exists(PATH):
        os.makedirs(PATH)

    with open_doc_file(os.path.join(PATH, "__init__.py")) as f:
        obj = sys.modules["Atrinik"]
        dump_docstring(obj, f)
        names = dump_obj(obj, f, defaults=defaults)
        line = "__all__ = [{}]".format(", ".join(names))
        f.write(textwrap.fill(line, width=80, subsequent_indent=" " * 11))
        f.write("\n")


if not GetSettings()["unit_tests"] and not GetSettings()["plugin_unit_tests"]:
    parser = CParser()
    matches = {}

    def scan(path):
        for root, dirs, files in os.walk(path):
            for file in files:
                matches.update(parser.parse(os.path.join(root, file)))

    scan("src/server")
    scan("src/plugins/plugin_python/include")
    scan("src/include")
    matches.update(parser.parse("../common/toolkit/socket.h"))

    main()
示例#25
0
    return s


# Make a <span> which works similar to <acronym>.
# @param s String.
# @param t Title-text.
# @return The <span>.
def make_span_hover(s, t):
    return "<span title=\"{0}\" style=\"border-bottom: 1px dashed #000; cursor: help;\">{1}</span>".format(
        t, s)


# Initialize the CParser.
parser = CParser({
    "defines": [
        "FLAG_(.*)",
        "MAP_FLAG_(.*)",
    ],
})


# Go through the source code files recursively, and parse them.
# @param path Path.
def parse_rec(path):
    nodes = os.listdir(path)

    for node in nodes:
        if os.path.isdir(path + "/" + node):
            parse_rec(path + "/" + node)
        elif os.path.isfile(path + "/" + node):
            if node[-2:] == ".c" or node[-2:] == ".h":
                parser.parse(path + "/" + node)