Exemplo n.º 1
0
def executeTests():
    web_prediction_results = utilsConf.get_configuration().web_prediction_results
    matrix_path = os.path.join(web_prediction_results, "matrix_{0}_{1}.matrix")
    outcomes_path = os.path.join(web_prediction_results, "outcomes.json")
    diagnoses_path = os.path.join(web_prediction_results, "diagnosis_{0}_{1}.matrix")
    diagnoses_json_path = os.path.join(web_prediction_results, "diagnosis_{0}_{1}.json")
    tested_repo = utilsConf.to_short_path(os.path.join(utilsConf.get_configuration().workingDir, "testedVer", "repo"))
    test_runner = TestRunner(tested_repo, AmirTracer(tested_repo, utilsConf.get_configuration().amir_tracer, utilsConf.get_configuration().DebuggerTests))
    test_runner.run()
    tests = test_runner.get_tests()
    json_observations = map(lambda test: test_runner.observations[test].as_dict(), tests)
    with open(outcomes_path, "wb") as f:
        f.write(json.dumps(json_observations))
    for granularity in utilsConf.get_configuration().prediction_files:
        for bugged_type in utilsConf.get_configuration().prediction_files[granularity]:
            components_priors = get_components_probabilities(bugged_type, granularity, test_runner, tests)
            tests_details = map(
                lambda test_name: (test_name, list(set(test_runner.tracer.traces[test_name].get_trace(granularity)) & set(components_priors.keys())),
                                   test_runner.observations[test_name].get_observation()),
                tests)
            matrix = matrix_path.format(granularity, bugged_type)
            write_planning_file(matrix, [], filter(lambda test: len(test[1]) > 0, tests_details),
                                priors=components_priors)
            inst = readPlanningFile(matrix)
            inst.diagnose()
            named_diagnoses = sorted(inst.get_named_diagnoses(), key=lambda d: d.probability, reverse=True)
            with open(diagnoses_path.format(granularity, bugged_type), "wb") as diagnosis_file:
                diagnosis_file.writelines("\n".join(map(lambda d: repr(d), named_diagnoses)))
            with open(diagnoses_json_path.format(granularity, bugged_type), "wb") as diagnosis_json:
                diagnosis_json.writelines(json.dumps(map(lambda d: dict([('_name', d[0])] + d[1].as_dict().items()), enumerate(named_diagnoses))))
    return test_runner
Exemplo n.º 2
0
def executeTests():
    web_prediction_results = utilsConf.get_configuration().web_prediction_results
    matrix_path = os.path.join(web_prediction_results, "matrix_{0}_{1}.matrix")
    outcomes_path = os.path.join(web_prediction_results, "outcomes.json")
    diagnoses_path = os.path.join(web_prediction_results, "diagnosis_{0}_{1}.matrix")
    diagnoses_json_path = os.path.join(web_prediction_results, "diagnosis_{0}_{1}.json")
    tested_repo = utilsConf.to_short_path(os.path.join(utilsConf.get_configuration().workingDir, "version_to_test_trace", "repo"))
    utilsConf.open_subprocess(["git", "-C", tested_repo, 'checkout', '--', '.'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()
    test_runner = TestRunner(tested_repo, utilsConf.get_configuration().traces_dir)
    test_runner.run()
    tests = test_runner.get_tests()
    json_observations = map(lambda test: test_runner.observations[test].as_dict(), tests)
    with open(outcomes_path, "wb") as f:
        f.write(json.dumps(json_observations))
    for bugged_type in utilsConf.get_configuration().prediction_files:
        for granularity in utilsConf.get_configuration().prediction_files[bugged_type]:
            components_priors = get_components_probabilities(bugged_type, granularity, test_runner, tests)
            tests_details = map(
                lambda test_name: (test_name, list(set(test_runner.traces[test_name].get_trace(granularity)) & set(components_priors.keys())),
                                   test_runner.observations[test_name].get_observation()),
                tests)
            matrix = matrix_path.format(granularity, bugged_type)
            write_planning_file(matrix, [], filter(lambda test: len(test[1]) > 0, tests_details),
                                priors=components_priors)
            inst = readPlanningFile(matrix)
            inst.diagnose()
            named_diagnoses = sorted(inst.get_named_diagnoses(), key=lambda d: round(d.probability, 2), reverse=True)
            with open(diagnoses_path.format(granularity, bugged_type), "wb") as diagnosis_file:
                diagnosis_file.writelines("\n".join(map(lambda d: repr(d), named_diagnoses)))
            with open(diagnoses_json_path.format(granularity, bugged_type), "wb") as diagnosis_json:
                diagnosis_json.writelines(json.dumps(map(lambda d: dict([('_name', d[0])] + d[1].as_dict().items()), enumerate(named_diagnoses))))
    return test_runner
Exemplo n.º 3
0
 def create_instances(self):
     MATRIX_PATH = os.path.join(utilsConf.get_configuration().experiments,
                                "{ITERATION}_{BUG_ID}_{GRANULARITY}_{BUGGED_TYPE}.matrix")
     DESCRIPTION = 'sample bug id {BUG_ID} with bug_passing_probability = {PROB} with garnularity of {GRANULARITY} and bugged type {BUGGED_TYPE}'
     i = 0.0
     results = AvgResults()
     while i < self.num_instances:
         bug = random.choice(self.bugs)
         buggy_components = set()
         for component in set(self.components_priors.keys()):
             for buggy in bug.get_buggy_components(self.granularity, self.bugged_type):
                 if component in buggy:
                     buggy_components.add(component)
         if len(buggy_components) == 0:
             continue
         tests = reduce(set.__or__,
                        map(lambda x: self.test_runner.get_packages_tests().get('.'.join(x[:random.randint(0, len(x))]), set()),
                            map(lambda file_name: file_name.replace('.java', '').split('.'), buggy_components)),
                        set())
         if len(tests) < self.tests_per_instance:
             continue
         relevant_tests = random.sample(tests, self.tests_per_instance)
         tests_details = []
         for test_name in relevant_tests:
             trace = list(set(self.test_runner.tracer.traces[test_name].get_trace(self.granularity)) & set(
                 self.components_priors.keys()))
             tests_details.append((test_name, trace, self.sample_observation(trace, buggy_components)))
         if sum(map(lambda x: x[2], tests_details)) == 0:
             continue
         matrix = MATRIX_PATH.format(ITERATION=i, BUG_ID=bug.bug_id, GRANULARITY=self.granularity, BUGGED_TYPE=self.bugged_type)
         write_planning_file(matrix, list(buggy_components),
                             filter(lambda test: len(test[1]) > 0, tests_details),
                             priors=self.components_priors,
                             description=DESCRIPTION.format(BUG_ID=bug.bug_id, PROB=self.bug_passing_probability,
                                                            GRANULARITY=self.granularity, BUGGED_TYPE=self.bugged_type))
         inst = readPlanningFile(matrix)
         inst.diagnose()
         res = Diagnosis_Results(inst.diagnoses, inst.initial_tests, inst.error)
         results.update(res)
         print "created instance num {ITERATION} with bugid {BUG_ID} for granularity {GRANULARITY} and type {BUGGED_TYPE}".\
             format(ITERATION=i, BUG_ID=bug.bug_id, GRANULARITY=self.granularity, BUGGED_TYPE=self.bugged_type)
         i += 1
     return results.results
Exemplo n.º 4
0
def create_matrix_for_dir(examples_path,
                          bugged_path,
                          matrix_path,
                          files_to_read=None):
    cases = []
    for msec_file in glob.glob(os.path.join(examples_path,
                                            "*.msec"))[:files_to_read]:
        try:
            modules, exploitability = get_loaded_modules_traces(msec_file)
            modules = filter_known_dlls(modules)
            cases.append(
                (os.path.basename(msec_file), modules, exploitability))
        except:
            print "fail load file", msec_file
    bugs = []
    if bugged_path != None:
        with open(bugged_path) as f:
            lines = f.readlines()
            bugs = map(
                lambda line: str.lower(line).replace("\n", "").replace(
                    " ", "").replace(".dll", ""), lines)
    write_planning_file(matrix_path, bugs, cases)
Exemplo n.º 5
0
def abstraction():
    write_planning_file(
        r"c:\temp\yemp_matrix.txt", ["a"],
        [["T1", ["a", "b", "d"], 1], ["T2", ["b"], 0],
         ["T3", ["a", "b", "c"], 1], ["T4", ["a", "b", "c"], 0]])
    instance = readPlanningFile(r"c:\temp\yemp_matrix.txt")
    write_planning_file(r"c:\temp\yemp_matrix.txt", ["a"],
                        [["T1", ["a", "b", "d"], 1], ["T2", ["b"], 0],
                         ["T3", ["a", "b"], 1], ["T4", ["a", "b"], 0]])
    instance = readPlanningFile(r"c:\temp\yemp_matrix.txt")
    write_planning_file(r"c:\temp\yemp_matrix.txt", ["a"],
                        [["T1", ["a", "b"], 1], ["T2", ["b"], 0],
                         ["T3", ["a", "b"], 1], ["T4", ["a", "b"], 0]])
    instance = readPlanningFile(r"c:\temp\yemp_matrix.txt")
    print "a"
Exemplo n.º 6
0
    Popen(['git', 'clone', git_path, git_commit_path]).communicate()
    Popen(['git', 'checkout', '-f', '{0}'.format(commit_to_observe)], cwd=git_commit_path).communicate()
    return git_commit_path


if __name__ == "__main__":
    tr = TestRunner(r"C:\t\a\MavenProj", AmirTracer(r"C:\t\a\MavenProj", r"C:\temp\uber-tracer-1.0.1-SNAPSHOT.jar", r"c:\temp\temp"))
    tr.run()
    import csv
    assert len(sys.argv) == 5
    _ , repo, matrix_path, prediction_path, tracer_path = sys.argv
    for x in [repo, prediction_path, tracer_path]:
        assert os.path.exists(x)
    predictions = {}
    with open(prediction_path) as f:
        lines = list(csv.reader(f))[1:]
        predictions = dict(map(lambda line: (line[0].replace(".java", "").replace(os.path.sep, ".").lower(), line[1]), lines))
    tr = TestRunner(repo, AmirTracer(repo, tracer_path))
    tr.run()
    from sfl_diagnoser.Diagnoser.diagnoserUtils import write_planning_file
    tests = set(tr.tracer.traces.keys()) & set(tr.observations.keys())
    components_priors = {}
    for component in set(reduce(list.__add__, map(lambda test_name: tr.tracer.traces[test_name].files_trace(), tests), [])):
        for prediction in predictions:
            if component in prediction:
                components_priors[component] = predictions[prediction]
    components = set(components_priors.keys())
    tests_details = map(lambda test_name: (test_name, list(set(tr.tracer.traces[test_name].files_trace()) & components), tr.observations[test_name].get_observation()),
                        tests)
    write_planning_file(matrix_path, [], filter(lambda test: len(test[1]) > 0, tests_details), priors=components_priors)
Exemplo n.º 7
0
     assert os.path.exists(x)
 predictions = {}
 with open(prediction_path) as f:
     lines = list(csv.reader(f))[1:]
     predictions = dict(
         map(
             lambda line: (line[0].replace(".java", "").replace(
                 os.path.sep, ".").lower(), line[1]), lines))
 tr = TestRunner(repo, AmirTracer(repo, tracer_path))
 tr.run()
 from sfl_diagnoser.Diagnoser.diagnoserUtils import write_planning_file
 tests = set(tr.tracer.traces.keys()) & set(tr.observations.keys())
 components_priors = {}
 for component in set(
         reduce(
             list.__add__,
             map(
                 lambda test_name: tr.tracer.traces[test_name].files_trace(
                 ), tests), [])):
     for prediction in predictions:
         if component in prediction:
             components_priors[component] = predictions[prediction]
 components = set(components_priors.keys())
 tests_details = map(
     lambda test_name:
     (test_name,
      list(set(tr.tracer.traces[test_name].files_trace()) & components), tr.
      observations[test_name].get_observation()), tests)
 write_planning_file(matrix_path, [],
                     filter(lambda test: len(test[1]) > 0, tests_details),
                     priors=components_priors)
Exemplo n.º 8
0
    # results = Diagnosis_Results(inst.diagnoses, inst.initial_tests, inst.error)
    # results.get_metrics_names()
    # results.get_metrics_values()
    # print results
    # exit()
    # ei = sfl_diagnoser.Diagnoser.ExperimentInstance.addTests(inst, inst.hp_next())
    #
    # # check_influence()
    # # exit()
    # base = readPlanningFile(r"C:\Users\User\Downloads\MatrixFile4.txt")
    # from sfl_diagnoser.Planner.HP_Random import main_HP
    # main_HP(base)

    abstraction()
    write_planning_file(r"c:\temp\yemp_matrix.txt", ["a"],
                        [["T1", ["a", "c"], 1], ["T2", ["b"], 0],
                         ["T3", ["a", "b"], 1], ["T4", ["a", "b", "c"], 0]])
    write_planning_file(r"c:\temp\yemp_matrix2.txt", ["a"],
                        [["T1", ["a", "b"], 1], ["T2", ["b"], 0],
                         ["T3", ["a", "b"], 1], ["T4", ["a", "b"], 0]])
    instance = readPlanningFile(r"c:\temp\yemp_matrix.txt")
    instance = readPlanningFile(r"c:\temp\yemp_matrix2.txt")
    instance = readPlanningFile(
        r"C:\Users\User\Dropbox\softwareMbd (1)\Amir_AAAI18\vulnerability example\code_blocks_matrix.txt"
    )
    instance.get_named_diagnoses()
    named = get_xref_diagnoses(instance.get_named_diagnoses(), "#")
    # instance = readPlanningFile(r"c:\temp\merged_matrix.txt")
    function_instance = readPlanningFile(
        r"C:\vulnerabilities\ImageMagick_exploited\CVE-2016-8866\fuzzing\function_matrix.txt"
    )
Exemplo n.º 9
0
        return surefire_files

    def collect_traces(self):
        traces_files = []
        for root, dirs, _ in os.walk(os.path.abspath(os.path.join(self.git_path, "..\.."))):
            traces_files.extend(map(lambda name: glob.glob(os.path.join(root, name, "TRACE_*.txt")), filter(lambda name: name == "DebuggerTests", dirs)))
        for trace_file in reduce(list.__add__, traces_files, []):
            test_name = trace_file.split('\\Trace_')[1].split('_')[0].lower()
            with open(trace_file) as f:
                self.traces[test_name] = Trace(test_name, map(lambda line: line.strip().split()[2].strip(), f.readlines()))


def run_mvn_on_commits(commits, git_path):
    for commit in commits:
        observe_tests(commit, git_path)

def checkout_commit(commit_to_observe, git_path):
    git_commit_path = os.path.join(OBSERVE_PATH, os.path.basename(git_path), commit_to_observe)
    Popen(['git', 'clone', git_path, git_commit_path]).communicate()
    Popen(['git', 'checkout', '-f', '{0}'.format(commit_to_observe)], cwd=git_commit_path).communicate()
    return git_commit_path


if __name__ == "__main__":
    # tr = TestRunner(r"C:\Temp\accumulo", r"C:\Users\User\Documents\GitHub\java_tracer\tracer\target\uber-tracer-1.0.1-SNAPSHOT.jar")
    tr = TestRunner(r"C:\Temp\tik\tik\tika", r"C:\Users\User\Documents\GitHub\java_tracer\tracer\target\uber-tracer-1.0.1-SNAPSHOT.jar")
    tr.run()
    from sfl_diagnoser.Diagnoser.diagnoserUtils import write_planning_file
    tests_details = map(lambda test_name: (test_name, tr.traces[test_name].files_trace(), 0 if 'pass' == tr.observations[test_name].outcome else 1), set(tr.traces.keys()) & set(tr.observations.keys()))
    write_planning_file(r'c:\temp\tracer_matrix.txt', [], tests_details)
Exemplo n.º 10
0
 def create_instances(self):
     MATRIX_PATH = os.path.join(
         utilsConf.get_configuration().experiments,
         "{ITERATION}_{BUG_ID}_{GRANULARITY}_{BUGGED_TYPE}.matrix")
     DESCRIPTION = 'sample bug id {BUG_ID} with bug_passing_probability = {PROB} with garnularity of {GRANULARITY} and bugged type {BUGGED_TYPE}'
     i = 0.0
     results = AvgResults()
     while i < self.num_instances:
         bug = random.choice(self.bugs)
         buggy_components = set()
         for component in set(self.components_priors.keys()):
             for buggy in bug.get_buggy_components(self.granularity,
                                                   self.bugged_type):
                 if component in buggy:
                     buggy_components.add(component)
         if len(buggy_components) == 0:
             continue
         tests = reduce(
             set.__or__,
             map(
                 lambda x: self.test_runner.get_packages_tests().get(
                     '.'.join(x[:random.randint(0, len(x))]), set()),
                 map(
                     lambda file_name: file_name.replace('.java', '').split(
                         '.'), buggy_components)), set())
         if len(tests) < self.tests_per_instance:
             continue
         relevant_tests = random.sample(tests, self.tests_per_instance)
         tests_details = []
         for test_name in relevant_tests:
             trace = list(
                 set(self.test_runner.tracer.traces[test_name].get_trace(
                     self.granularity))
                 & set(self.components_priors.keys()))
             tests_details.append(
                 (test_name, trace,
                  self.sample_observation(trace, buggy_components)))
         if sum(map(lambda x: x[2], tests_details)) == 0:
             continue
         matrix = MATRIX_PATH.format(ITERATION=i,
                                     BUG_ID=bug.bug_id,
                                     GRANULARITY=self.granularity,
                                     BUGGED_TYPE=self.bugged_type)
         write_planning_file(matrix,
                             list(buggy_components),
                             filter(lambda test: len(test[1]) > 0,
                                    tests_details),
                             priors=self.components_priors,
                             description=DESCRIPTION.format(
                                 BUG_ID=bug.bug_id,
                                 PROB=self.bug_passing_probability,
                                 GRANULARITY=self.granularity,
                                 BUGGED_TYPE=self.bugged_type))
         inst = readPlanningFile(matrix)
         inst.diagnose()
         res = Diagnosis_Results(inst.diagnoses, inst.initial_tests,
                                 inst.error)
         results.update(res)
         print "created instance num {ITERATION} with bugid {BUG_ID} for granularity {GRANULARITY} and type {BUGGED_TYPE}".\
             format(ITERATION=i, BUG_ID=bug.bug_id, GRANULARITY=self.granularity, BUGGED_TYPE=self.bugged_type)
         i += 1
     return results.results