Beispiel #1
0
def get_file_path_jncss(strategy, class_name, test_dir, results_dir_name,
                        bug_type, stopping_condition, search_budget, criterion,
                        runid, javancss_jar_path):
    package = class_name.split(".")[0:-1]
    package_dir = utils.get_package_dir(package)

    if "randoop".upper() in criterion.upper():
        only_class_name = "RegressionTest?.java"  #Randoop genera varios .java, c/u con 500 tests
    else:
        only_class_name = class_name.split(".")[-1] + "_ESTest.java"
    test_suite_file_path = os.path.join(test_dir, package_dir, only_class_name)

    result_jncss_temp = os.path.join(
        results_dir_name, "javancss_temp",
        "{}_{}_{}_{}_{}_{}".format(strategy, bug_type, stopping_condition,
                                   search_budget, class_name, criterion))
    utils.make_dirs_if_not_exist(result_jncss_temp)

    result_jncss_temp = os.path.join(result_jncss_temp, "{}.txt".format(runid))
    command = "java -jar {} {} > {}".format(javancss_jar_path,
                                            test_suite_file_path,
                                            result_jncss_temp)
    utils.print_command(command)
    utils.lock_if_windows()
    try:
        subprocess.check_output(command, shell=True)
    except Exception as e:
        print("Error al ejecutar el comando '{}'. Error {}".format(command, e))
    finally:
        utils.release_if_windows()
    return result_jncss_temp
Beispiel #2
0
def run_randoop(projectCP, class_name, randoop_jar_path, testdir,
                search_budget):
    def remove_randoop_error_test(testdir):
        for test in os.listdir(testdir):
            if "ErrorTest" in test:
                test_file = os.path.join(testdir, test)
                os.unlink(test_file)

            #if not test_file.endswith(".java"):
            #    continue
            #if not test[-1:].isdigit(): # Randoop generates a test file without tests.
            #    continue

    """def change_class_name(test_dir, new_classname):
        utils.rename_class(test_dir, "RegressionTest", new_classname)"""

    utils.make_dirs_if_not_exist(testdir)
    sep = os.path.pathsep
    err_file = os.path.join(testdir, "err.txt")
    out_file = os.path.join(testdir, "out.txt")
    package = class_name.split(".")[0:-1]
    packages_dir = utils.get_package_dir(package)
    command = 'java -classpath {}{}{} randoop.main.Main gentests --testclass={} --time-limit={} --usethreads=true --junit-package-name={} --npe-on-non-null-input=expected --junit-output-dir={} > {} 2> {}'.format(
        projectCP, sep, randoop_jar_path, class_name, search_budget,
        packages_dir.replace(os.path.sep, ".")[:-1], testdir, out_file,
        err_file)
    utils.print_command(command)
    try:
        subprocess.check_output(command, shell=True)
    except Exception as e:
        print("Error al correr randoop con el comando '{}'".format(command, e))
    testdir_full = os.path.join(testdir, packages_dir)
    remove_randoop_error_test(testdir_full)
def main():
    global args
    args = parser.parse_args()

    print()
    print('Command-line argument values:')
    for key, value in vars(args).items():
        print('-', key, ':', value)
    print()

    test_params = [
        args.model,
        path_to_save_string(args.dataset),
        path_to_save_string(args.test_dataset), args.viewpoint_modulo,
        args.batch_size, args.epochs, args.lr, args.weight_decay, args.seed,
        args.routing_iters
    ]
    test_name = '_'.join([str(x) for x in test_params]) + '.pth'
    model_params = [
        args.model,
        path_to_save_string(args.dataset), args.viewpoint_modulo,
        args.batch_size, args.epochs, args.lr, args.weight_decay, args.seed,
        args.routing_iters
    ]
    model_name = '_'.join([str(x) for x in model_params]) + '.pth'
    header = 'model,training-dataset,test-dataset,viewpoint_modulo,' \
             'batch_size,epochs,lr,weight_decay,seed,em_iters,accuracy'
    snapshot_path = os.path.join('.', 'snapshots', model_name)
    result_path = os.path.join('.', 'results', 'pytorch_test.csv')

    make_dirs_if_not_exist([snapshot_path, result_path])

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    model, criterion, optimizer, scheduler = load_model(
        args.model,
        device_ids=args.device_ids,
        lr=args.lr,
        routing_iters=args.routing_iters)

    num_class, train_loader, test_loader = load_datasets(
        args.test_dataset, args.batch_size, args.test_batch_size,
        args.test_viewpoint_modulo)
    model.load_state_dict(torch.load(snapshot_path))
    acc, predictions, labels, logits = test(test_loader,
                                            model,
                                            criterion,
                                            chunk=1)
    print(f'Accuracy: {acc:.2f}%')
    print(f'Memory usage: {gpu_memory_usage()}')

    to_write = test_params + [acc.cpu().numpy()]
    append_to_csv(result_path, to_write, header=header)

    if args.roc != '':
        make_dirs_if_not_exist(args.roc)
        torch.save((predictions, labels, logits), args.roc)
Beispiel #4
0
    def __init__(self, name, junit_jar, instrumented_code_dir,
                 original_code_dir, evosuite_classes, evosuite_jar_path,
                 evosuite_runtime_jar_path, class_name, epa_path, criterion,
                 bug_type, stopping_condition, search_budget, runid, method,
                 results_dir_name, subdir_mutants, error_prot_list,
                 ignore_mutants_list, hamcrest_jar_path):
        threading.Thread.__init__(self)

        self.subdir_testgen = os.path.join(results_dir_name, "testgen", name,
                                           bug_type, stopping_condition,
                                           search_budget,
                                           criterion.replace(':', '_').lower(),
                                           "{}".format(runid))
        utils.make_dirs_if_not_exist(self.subdir_testgen)
        self.subdir_metrics = os.path.join(results_dir_name, "metrics", name,
                                           bug_type, stopping_condition,
                                           search_budget,
                                           criterion.replace(':', '_').lower(),
                                           "{}".format(runid))
        self.generated_test_report_evosuite_dir = os.path.join(
            self.subdir_testgen, 'report_evosuite_generated_test')
        self.subdir_mutants = subdir_mutants

        self.name = name
        self.junit_jar = junit_jar
        self.instrumented_code_dir = instrumented_code_dir
        self.original_code_dir = original_code_dir
        self.evosuite_classes = evosuite_classes
        self.evosuite_jar_path = evosuite_jar_path
        self.evosuite_runtime_jar_path = evosuite_runtime_jar_path
        self.class_name = class_name
        self.epa_path = epa_path
        self.criterion = criterion
        self.bug_type = bug_type
        self.generated_test_dir = os.path.join(self.subdir_testgen, 'test')
        self.generated_report_evosuite_dir = os.path.join(
            self.subdir_metrics, 'report_evosuite')
        self.generated_report_pitest_dir = os.path.join(
            self.subdir_metrics, 'report_pitest')
        self.generated_pitest_killer_test = os.path.join(
            self.generated_report_pitest_dir, 'killer_test')
        self.generated_report_mujava = os.path.join(self.subdir_metrics,
                                                    'report_mujava')
        self.stopping_condition = stopping_condition
        self.search_budget = search_budget
        self.runid = runid

        self.home_dir = os.path.dirname(os.path.abspath(__file__))
        self.bin_original_code_dir = get_subject_original_bin_dir(
            results_dir_name, name)
        self.bin_instrumented_code_dir = get_subject_instrumented_bin_dir(
            results_dir_name, name)
        self.method = method
        self.assert_type = AssertType.ASSERT.name  # default

        self.error_prot_list = error_prot_list
        self.ignore_mutants_list = ignore_mutants_list
        self.hamcrest_jar_path = hamcrest_jar_path
        def execute_testsuite(class_dir, testsuite_name, output_dir, id_name):
            def read_results(result):
                def get_line_with_test_results(result):
                    with open(result) as f:
                        content = f.readlines()
                    last_line = ""
                    for line in content:
                        if re.match(r'^\s*$', line):  #line empty
                            continue
                        else:
                            last_line = line
                    return last_line

                def get_total_and_failures(line):
                    def all_test_ok(line):
                        return "OK" in line

                    if all_test_ok(line):
                        return [[
                            int(s) for s in line.replace("(", "").split(" ")
                            if s.isdigit()
                        ][0], 0]
                    ret = [
                        int(s) for s in line.replace(",", "").replace(
                            "\n", "").split(" ") if s.isdigit()
                    ]
                    return ret

                last_line = get_line_with_test_results(result)
                total, failure = get_total_and_failures(last_line)
                return [total, failure]

            sep = os.path.pathsep
            output_dir = os.path.join(output_dir, "junit_results")
            utils.make_dirs_if_not_exist(output_dir)
            junit_log_name = id_name + "_junit_out.txt"
            junit_log_error_name = id_name + "_junit_err.txt"
            command_junit = "java -cp {}{}{}{}{}{}{} org.junit.runner.JUnitCore {} > {}{}{} 2> {}{}{}".format(
                self.junit_path, sep, self.hamcrest_jar, sep, class_dir, sep,
                self.test_suite_bin, testsuite_name, output_dir, os.path.sep,
                junit_log_name, output_dir, os.path.sep, junit_log_error_name)
            self.running_cmd += "\nRunning: {}".format(command_junit)
            try:
                subprocess.check_output(command_junit, shell=True)
            except:
                print(
                    "Error al ejecutar el comando '{}'".format(command_junit))

            ret = read_results("{}{}{}".format(output_dir, os.path.sep,
                                               junit_log_name))
            self.running_cmd += "\n\tResults: {}{} , Total: {} - Failure: {}\n".format(
                output_dir, junit_log_name, ret[0], ret[1])
            return ret
Beispiel #6
0
def measure_evosuite(evosuite_jar_path, projectCP, testCP, class_name,
                     epa_path, report_dir, criterion):
    utils.make_dirs_if_not_exist(report_dir)
    err_file = os.path.join(report_dir,
                            criterion.replace(":", "_") + "_err.txt")
    out_file = os.path.join(report_dir,
                            criterion.replace(":", "_") + "_out.txt")
    sep = os.path.pathsep
    command = 'java -jar {}evosuite-master-1.0.4-SNAPSHOT.jar -projectCP {}{}{} -class {} -Depa_xml_path={} -criterion {} -Dwrite_covered_goals_file=\"true\" -Dwrite_all_goals_file=\"true\" -Dreport_dir={} -measureCoverage > {} 2> {}'.format(
        evosuite_jar_path, projectCP, sep, testCP, class_name, epa_path,
        criterion, report_dir, out_file, err_file)
    utils.print_command(command)
    subprocess.check_output(command, shell=True)
Beispiel #7
0
def measure_evosuite(evosuite_jar_path, projectCP, testCP, class_name,
                     epa_path, report_dir, criterion, inferred_epa_xml_path,
                     force_inferred_epa):
    utils.make_dirs_if_not_exist(report_dir)
    err_file = os.path.join(report_dir,
                            criterion.replace(":", "_") + "_err.txt")
    out_file = os.path.join(report_dir,
                            criterion.replace(":", "_") + "_out.txt")
    sep = "" if projectCP[-1] == os.path.pathsep else os.path.pathsep
    command = 'java -jar {} -projectCP {}{}{} -class {} -Depa_xml_path={} -criterion {} -Dwrite_covered_goals_file=\"true\" -Dwrite_all_goals_file=\"true\" -Dreport_dir={} -Dforce_inferred_epa={} -Dinferred_epa_xml_path={} -Dallows_actions_violates_precondition=\"false\" -measureCoverage > {} 2> {}'.format(
        evosuite_jar_path, projectCP, sep, testCP, class_name, epa_path,
        criterion, report_dir, force_inferred_epa, inferred_epa_xml_path,
        out_file, err_file)
    utils.print_command(command)
    try:
        subprocess.check_output(command, shell=True)
    except Exception as e:
        print(
            "Error al correr evosuite en la medicion de cobertura con el comando '{}'"
            .format(command, e))
Beispiel #8
0
    def run(self):
        if self.method in [
                EpatestingMethod.ONLY_TESTGEN.value,
                EpatestingMethod.BOTH.value,
                EpatestingMethod.BOTH_WITHOUT_MUJAVA.value
        ]:
            print('GENERATING TESTS')
            code_dir = self.instrumented_code_dir if "epa".upper(
            ) in self.criterion.upper() else self.original_code_dir
            if "mining".upper() in self.criterion.upper(
            ) or "Compiler_" in self.name:  #hack for Compiler
                code_dir = self.mining_code_dir

            bin_code_dir = self.bin_instrumented_code_dir if "epa".upper(
            ) in self.criterion.upper() else self.bin_original_code_dir
            if "mining".upper() in self.criterion.upper(
            ) or "Compiler_" in self.name:  #hack for Compiler
                bin_code_dir = self.bin_mining_code_dir
            if len(self.extra_classpath) != 0:
                bin_code_dir += os.path.pathsep + self.extra_classpath

            # if exists testsuite in other bug_type, copy it!
            testsuite_exists = False
            curr_bug_type = self.bug_type
            try:
                lock.acquire()
                testsuite_exists = cp_testsuite_if_exists_in_other_results(
                    curr_bug_type, self.subdir_testgen,
                    self.generated_test_report_evosuite_dir, self.class_name,
                    self.criterion)
            except Exception as e:
                testsuite_exists = False
                print(
                    "error copying from other bug_type folder to {}. Error {}".
                    format(self.subdir_testgen, e))
            finally:
                lock.release()

            if (not testsuite_exists):
                if self.criterion == "randoop":
                    run_randoop(projectCP=bin_code_dir,
                                class_name=self.class_name,
                                randoop_jar_path=self.randoop_jar_path,
                                testdir=self.generated_test_dir,
                                search_budget=self.search_budget)
                else:
                    run_evosuite(
                        evosuite_jar_path=self.evosuite_jar_path,
                        strategy=self.strategy_value,
                        projectCP=bin_code_dir,
                        class_name=self.class_name,
                        criterion=self.criterion,
                        epa_path=self.epa_path,
                        inferred_epa_xml_path=self.inferred_epa_xml,
                        test_dir=self.generated_test_dir,
                        stopping_condition=self.stopping_condition,
                        search_budget=self.search_budget,
                        report_dir=self.generated_test_report_evosuite_dir)

            add_fails = False
            if (self.bug_type.upper() == BugType.ERRPROT.name):
                # If is run in errprot mode, then always remove asserts and specific exceptions
                self.assert_type = AssertType.NO_ASSERT_EXCEPTION.name
                #if("JDBCResultSet" in self.name):
                #add_fails= True;
            if self.assert_type.upper() in [
                    AssertType.NO_ASSERT.name,
                    AssertType.NO_ASSERT_EXCEPTION.name
            ]:
                if "randoop".upper() in self.criterion.upper():
                    test_dir = self.generated_test_dir
                    packages_dir = utils.get_package_dir(
                        self.class_name.split(".")[:-1])
                    test_dir_sub = os.path.join(test_dir, packages_dir)
                    for test_file_name in os.listdir(test_dir_sub):
                        test_file = os.path.join(test_dir_sub, test_file_name)
                        if not test_file.endswith(".java"):
                            continue
                        # ErrorTest files are generated by randoop. Contains error test. That fails in PIT
                        if "ErrorTest" in test_file:
                            continue
                        workaround_test(self.generated_test_dir,
                                        self.class_name, test_file_name,
                                        add_fails, self.assert_type)
                else:
                    test_file_name = self.class_name.split(
                        ".")[-1] + "_ESTest.java"
                    workaround_test(self.generated_test_dir, self.class_name,
                                    test_file_name, add_fails,
                                    self.assert_type)

            utils.compile_workdir(self.generated_test_dir,
                                  self.generated_test_dir, bin_code_dir,
                                  self.junit_jar, self.evosuite_classes,
                                  self.evosuite_runtime_jar_path,
                                  self.extra_classpath)

        criterion = get_alternative_criterion_names(self.criterion)

        if self.method in [
                EpatestingMethod.ONLY_METRICS.value,
                EpatestingMethod.BOTH.value,
                EpatestingMethod.BOTH_WITHOUT_MUJAVA.value,
                EpatestingMethod.ONLY_METRICS_WITHOUT_MUJAVA.value
        ]:
            print('GENERATING METRICS')
            packages_dir = utils.get_package_dir(
                self.class_name.split(".")[:-1])
            test_dir_sub = os.path.join(self.generated_test_dir, packages_dir)
            if not os.path.exists(test_dir_sub):
                print("not found test folder ! '{}'".format(test_dir_sub))
                exit(1)

            bin_code_dir = self.bin_instrumented_code_dir if "epa".upper(
            ) in self.criterion.upper() else self.bin_original_code_dir
            if "mining".upper() in self.criterion.upper(
            ) or "Compiler_" in self.name:  #hack for Compiler
                bin_code_dir = self.bin_mining_code_dir
            if len(self.extra_classpath) != 0:
                bin_code_dir += os.path.pathsep + self.extra_classpath

            ###to compile test suite
            #utils.compile_workdir(self.generated_test_dir, self.generated_test_dir, bin_code_dir, self.junit_jar, self.evosuite_classes, self.evosuite_runtime_jar_path, self.extra_classpath)

            #measure_evosuite(evosuite_jar_path=self.evosuite_jar_path, projectCP=self.bin_instrumented_code_dir, testCP=self.generated_test_dir, class_name=self.class_name, epa_path=self.epa_path, report_dir=self.generated_report_evosuite_dir, criterion="epatransition", inferred_epa_xml_path="", force_inferred_epa=False)
            #measure_evosuite(evosuite_jar_path=self.evosuite_jar_path, projectCP=self.bin_instrumented_code_dir, testCP=self.generated_test_dir, class_name=self.class_name, epa_path=self.epa_path, report_dir=self.generated_report_evosuite_dir, criterion="epaexception", inferred_epa_xml_path="", force_inferred_epa=False)
            #measure_evosuite(evosuite_jar_path=self.evosuite_jar_path, projectCP=self.bin_instrumented_code_dir, testCP=self.generated_test_dir, class_name=self.class_name, epa_path=self.epa_path, report_dir=self.generated_report_evosuite_dir, criterion="epaadjacentedges", inferred_epa_xml_path="", force_inferred_epa=False)
            # Hack to generate inferred epa for randoop. For other criteria it is generated in the generation process - only needed one true in force_inferred_epa
            force_inferred_epa_value = True if "randoop".upper(
            ) in self.criterion.upper() else False
            measure_evosuite(evosuite_jar_path=self.evosuite_jar_path,
                             projectCP=bin_code_dir,
                             testCP=self.generated_test_dir,
                             class_name=self.class_name,
                             epa_path=self.epa_path,
                             report_dir=self.generated_report_evosuite_dir,
                             criterion="epatransitionmining",
                             inferred_epa_xml_path=self.inferred_epa_xml,
                             force_inferred_epa=force_inferred_epa_value)
            measure_evosuite(evosuite_jar_path=self.evosuite_jar_path,
                             projectCP=self.bin_mining_code_dir,
                             testCP=self.generated_test_dir,
                             class_name=self.class_name,
                             epa_path=self.epa_path,
                             report_dir=self.generated_report_evosuite_dir,
                             criterion="epaexceptionmining",
                             inferred_epa_xml_path="",
                             force_inferred_epa=False)
            measure_evosuite(evosuite_jar_path=self.evosuite_jar_path,
                             projectCP=self.bin_mining_code_dir,
                             testCP=self.generated_test_dir,
                             class_name=self.class_name,
                             epa_path=self.epa_path,
                             report_dir=self.generated_report_evosuite_dir,
                             criterion="epaadjacentedgesmining",
                             inferred_epa_xml_path="",
                             force_inferred_epa=False)
            #if force_inferred_epa_value:
            #   return

            # Run Pitest to measure

            targetTests = "{}_ESTest".format(self.class_name)
            if "randoop".upper() in self.criterion.upper():
                targetTests = "{}.RegressionTest".format(
                    utils.get_package_name_from_qualifiedname(self.class_name))
            pitest_measure(
                self.generated_report_pitest_dir, self.class_name, targetTests,
                self.original_code_dir.replace("mining", "original"),
                self.generated_test_dir)
            #pitest_measure(self.generated_report_pitest_dir, self.class_name, self.original_code_dir, self.generated_test_dir, utils.get_package_dir(self.class_name.split(".")[0:-1]))

            #TODO: add strategy
            if self.method in [
                    EpatestingMethod.ONLY_METRICS.value,
                    EpatestingMethod.BOTH.value
            ]:
                mujava_measure(
                    self.bug_type, self.name, criterion, self.subdir_mutants,
                    self.error_prot_list, self.ignore_mutants_list,
                    self.bin_original_code_dir.replace("mining", "original"),
                    self.generated_test_dir, self.class_name, self.junit_jar,
                    self.hamcrest_jar_path, self.generated_report_mujava)

            # Resume the reports generated
            all_report_dir = os.path.join(self.subdir_metrics, 'all_reports')
            command_mkdir_report = 'mkdir {}'.format(all_report_dir)
            #utils.print_command(command_mkdir_report)
            if not os.path.exists(all_report_dir):
                os.makedirs(all_report_dir)

            copy_pitest_csv(self.name, self.generated_report_pitest_dir,
                            all_report_dir)

            statistics_csv = os.path.join(self.generated_report_evosuite_dir,
                                          "statistics.csv")
            copy_csv(statistics_csv, 'epacoverage_{}'.format(self.name),
                     all_report_dir)

            statistics_testgen_csv = ""
            if not self.criterion == "randoop":
                try:
                    statistics_testgen_csv = os.path.join(
                        self.generated_test_report_evosuite_dir,
                        "statistics.csv")
                    copy_csv(statistics_testgen_csv,
                             'statistics_testgen_{}'.format(self.name),
                             all_report_dir)
                except:
                    print(
                        "statistics_testgen_csv (generated by Evosuite) not found"
                    )
            mujava_csv = os.path.join(self.generated_report_mujava,
                                      "mujava_report.csv")
            if os.path.exists(mujava_csv):
                copy_csv(mujava_csv, 'mujava_{}'.format(self.name),
                         all_report_dir)
            else:
                print("Does not exists mujava file {}".format(mujava_csv))

            epacoverage_csv = os.path.join(
                all_report_dir, "epacoverage_{}.csv".format(self.name))
            if self.criterion != "randoop":
                statistics_testgen_csv = os.path.join(
                    all_report_dir,
                    "statistics_testgen_{}.csv".format(self.name))
            jacoco_csv = os.path.join(all_report_dir,
                                      "{}_jacoco.csv".format(self.name))
            mutations_csv = os.path.join(all_report_dir,
                                         "{}_mutations.csv".format(self.name))

            pit_mutants_histogram(self.strategy_name, self.bug_type, criterion,
                                  self.search_budget, self.stopping_condition,
                                  mutations_csv, self.generated_test_dir,
                                  self.generated_pitest_killer_test,
                                  self.runid)
            # For test suite LOC
            result_jncss_temp = get_file_path_jncss(
                self.strategy_name, self.class_name, self.generated_test_dir,
                self.results_dir_name, self.bug_type, self.stopping_condition,
                self.search_budget, criterion, self.runid,
                self.javancss_jar_path)
            # For covered exceptions goals
            testgen_log_file_path = os.path.join(self.subdir_testgen,
                                                 "testgen_out.txt")

            make_report_resume.resume(
                self.class_name, epacoverage_csv, statistics_testgen_csv,
                jacoco_csv, mutations_csv, self.resume_csv, self.runid,
                self.stopping_condition, self.search_budget, criterion,
                self.bug_type, self.strategy_name, mujava_csv,
                result_jncss_temp, testgen_log_file_path)

        if self.method in [EpatestingMethod.ONLY_PIT_MUTANTS_HISTOGRAM.value]:
            mutations_csv = get_mutation_csv_pit(
                self.generated_report_pitest_dir)
            pit_mutants_histogram(self.strategy_name, self.bug_type, criterion,
                                  self.search_budget, self.stopping_condition,
                                  mutations_csv, self.generated_test_dir,
                                  self.generated_pitest_killer_test,
                                  self.runid)

        # Hack (for old executions)
        if self.method in [
                EpatestingMethod.ONLY_TEST_SUITE_LOC_AND_EXCEPTION.value
        ]:
            #Para TS LOC
            result_jncss_temp = get_file_path_jncss(
                self.class_name, self.generated_test_dir,
                self.results_dir_name, self.bug_type, self.stopping_condition,
                self.search_budget, criterion, self.runid,
                self.javancss_jar_path)
            # Para obtener exceptions
            ####################
            testgen_log_file_path = os.path.join(self.subdir_testgen,
                                                 "testgen_out.txt")
            # Este archivo tiene la cantidad de goals cubiertos para cada criterio (-measureCoverage)
            all_report_dir = os.path.join(self.subdir_metrics, 'all_reports')
            epacoverage_csv = os.path.join(
                all_report_dir, "epacoverage_{}.csv".format(self.name))
            # Este archivo tiene la suma de goals cubiertos (incluyendo criterio exception)
            statistics_testgen_csv = os.path.join(
                all_report_dir, "statistics_testgen_{}.csv".format(self.name))

            utils.make_dirs_if_not_exist(self.subdir_metrics)
            make_report_resume.resume_test_suite_loc_and_exceptions(
                self.class_name, self.resume_csv, self.runid,
                self.stopping_condition, self.search_budget, criterion,
                self.bug_type, result_jncss_temp, testgen_log_file_path,
                epacoverage_csv, statistics_testgen_csv)
Beispiel #9
0
    def __init__(self, name, strategy, junit_jar, instrumented_code_dir,
                 mining_code_dir, original_code_dir, evosuite_classes,
                 evosuite_jar_path, evosuite_runtime_jar_path, class_name,
                 epa_path, criterion, bug_type, stopping_condition,
                 search_budget, runid, method, results_dir_name,
                 subdir_mutants, error_prot_list, ignore_mutants_list,
                 hamcrest_jar_path, randoop_jar_path, javancss_jar_path,
                 extra_classpath):
        threading.Thread.__init__(self)

        # Si el criterio es Randoop, entonces no es necesario ingresar una strategy
        if criterion.upper() in "randoop".upper():
            self.strategy_value = self.strategy_name = strategy
        else:
            self.strategy_value = strategy.value
            self.strategy_name = strategy.name.lower()

        self.subdir_testgen = os.path.join(results_dir_name, "testgen", name,
                                           bug_type, stopping_condition,
                                           search_budget, self.strategy_name,
                                           criterion.replace(':', '_').lower(),
                                           "{}".format(runid))
        utils.make_dirs_if_not_exist(self.subdir_testgen)
        self.subdir_metrics = os.path.join(results_dir_name, "metrics", name,
                                           bug_type, stopping_condition,
                                           search_budget, self.strategy_name,
                                           criterion.replace(':', '_').lower(),
                                           "{}".format(runid))
        self.generated_test_report_evosuite_dir = os.path.join(
            self.subdir_testgen, 'report_evosuite_generated_test')
        self.subdir_mutants = subdir_mutants
        self.resume_csv = os.path.join(self.subdir_metrics, 'resume.csv')
        self.inferred_epa_xml = os.path.join(self.subdir_metrics,
                                             'inferred_epa.xml')

        self.name = name
        self.junit_jar = junit_jar
        self.instrumented_code_dir = instrumented_code_dir
        self.mining_code_dir = mining_code_dir
        self.original_code_dir = original_code_dir
        self.evosuite_classes = evosuite_classes
        self.evosuite_jar_path = evosuite_jar_path
        self.evosuite_runtime_jar_path = evosuite_runtime_jar_path
        self.class_name = class_name
        self.epa_path = epa_path
        self.criterion = criterion
        self.bug_type = bug_type
        self.generated_test_dir = os.path.join(self.subdir_testgen, 'test')
        self.generated_report_evosuite_dir = os.path.join(
            self.subdir_metrics, 'report_evosuite')
        self.generated_report_pitest_dir = os.path.join(
            self.subdir_metrics, 'report_pitest')
        self.generated_pitest_killer_test = os.path.join(
            self.generated_report_pitest_dir, 'killer_test')
        self.generated_report_mujava = os.path.join(self.subdir_metrics,
                                                    'report_mujava')
        self.stopping_condition = stopping_condition
        self.search_budget = search_budget
        self.runid = runid

        self.home_dir = os.path.dirname(os.path.abspath(__file__))
        self.bin_original_code_dir = get_subject_original_bin_dir(
            results_dir_name, name)
        self.bin_instrumented_code_dir = get_subject_instrumented_bin_dir(
            results_dir_name, name)
        self.bin_mining_code_dir = get_subject_mining_bin_dir(
            results_dir_name, name)
        self.results_dir_name = results_dir_name
        self.method = method
        self.assert_type = AssertType.ASSERT.name  # default

        self.error_prot_list = error_prot_list
        self.ignore_mutants_list = ignore_mutants_list
        self.hamcrest_jar_path = hamcrest_jar_path
        self.randoop_jar_path = randoop_jar_path
        self.javancss_jar_path = javancss_jar_path
        self.extra_classpath = extra_classpath
Beispiel #10
0
def generate_pitest_workdir(pitest_dir):
    # To generate the pitest workdir we need the following hierachy:
    # pom.xml
    # src/main/java/ < source code we want to test
    # src/test/java/ < testsuite
    print("Generating pitest workdir...")
    #command_mkdir_home = "mkdir {}".format(pitest_dir)
    #utils.print_command(command_mkdir_home)
    utils.make_dirs_if_not_exist(pitest_dir)
    #if not os.path.exists(pitest_dir):
    #    os.makedirs(pitest_dir)
    pitest_dir_src = os.path.join(pitest_dir, "src")
    utils.make_dirs_if_not_exist(pitest_dir_src)
    #command_mkdir_src = "mkdir {}".format(pitest_dir_src)
    #utils.print_command(command_mkdir_src)
    #if not os.path.exists(pitest_dir_src):
    #    os.makedirs(pitest_dir_src)

    pitest_dir_src_main = os.path.join(pitest_dir, "src", "main")
    #command_mkdir_src_main = "mkdir {}".format(pitest_dir_src_main)
    #utils.print_command(command_mkdir_src_main)
    utils.make_dirs_if_not_exist(pitest_dir_src_main)
    #if not os.path.exists(pitest_dir_src_main):
    #   os.makedirs(pitest_dir_src_main)

    pitest_dir_src_main_java = os.path.join(pitest_dir, "src", "main", "java")
    #command_mkdir_src_main_java = "mkdir {}".format(pitest_dir_src_main_java)
    #utils.print_command(command_mkdir_src_main_java)
    utils.make_dirs_if_not_exist(pitest_dir_src_main_java)
    #if not os.path.exists(pitest_dir_src_main_java):
    #    os.makedirs(pitest_dir_src_main_java)

    pitest_dir_src_test = os.path.join(pitest_dir, "src", "test")
    #command_mkdir_src_test = "mkdir {}".format(pitest_dir_src_test)
    #utils.print_command(command_mkdir_src_test)
    utils.make_dirs_if_not_exist(pitest_dir_src_test)
    #if not os.path.exists(pitest_dir_src_test):
    #    os.makedirs(pitest_dir_src_test)

    pitest_dir_src_test_java = os.path.join(pitest_dir, "src", "test", "java")
    #command_mkdir_src_test_java = "mkdir {}".format(pitest_dir_src_test_java)
    #utils.print_command(command_mkdir_src_test_java)
    utils.make_dirs_if_not_exist(pitest_dir_src_test_java)
Beispiel #11
0
def main():
    global args
    args = parser.parse_args()

    print()
    print('Command-line argument values:')
    for key, value in vars(args).items():
        print('-', key, ':', value)
    print()

    params = [
        args.model,
        path_to_save_string(args.dataset), args.viewpoint_modulo,
        args.batch_size, args.epochs, args.lr, args.weight_decay, args.seed,
        args.routing_iters
    ]
    model_name = '_'.join([str(x) for x in params]) + '.pth'
    header = 'model,dataset,viewpoint_modulo,batch_size,epochs,lr,weight_decay,seed,em_iters,accuracy'
    snapshot_path = os.path.join('.', 'snapshots', model_name)
    data_path = os.path.join('.', 'results', 'training_data', model_name)
    result_path = os.path.join('.', 'results', 'pytorch_train.csv')

    make_dirs_if_not_exist([snapshot_path, data_path, result_path])

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    model, criterion, optimizer, scheduler = load_model(
        args.model,
        device_ids=args.device_ids,
        lr=args.lr,
        routing_iters=args.routing_iters)
    num_class, train_loader, test_loader = load_datasets(
        args.dataset, args.batch_size, args.test_batch_size,
        args.viewpoint_modulo)

    best_acc = 0
    training_accuracies = []
    test_accuracies = []

    if args.append:
        model.load_state_dict(torch.load(snapshot_path))
    try:
        for epoch in range(1, args.epochs + 1):
            print()
            acc = train(train_loader,
                        model,
                        criterion,
                        optimizer,
                        epoch,
                        epochs=args.epochs,
                        log_interval=args.log_interval)
            training_accuracies.append(acc)
            scheduler.step(acc)
            print('Epoch accuracy was %.1f%%. Learning rate is %.9f.' %
                  (acc, optimizer.state_dict()['param_groups'][0]['lr']))
            if epoch % args.test_interval == 0:
                test_acc, __, __, __ = test(test_loader,
                                            model,
                                            criterion,
                                            chunk=args.test_size)
                test_accuracies.append(test_acc)
                if test_acc > best_acc:
                    best_acc = test_acc
    except KeyboardInterrupt:
        print('Cancelled training after %d epochs' % (epoch - 1))
        args.epochs = epoch - 1

    acc, predictions, labels, logits = test(test_loader,
                                            model,
                                            criterion,
                                            chunk=1)
    print(f'Accuracy: {acc:.2f}% (best: {best_acc:.2f}%)')

    to_write = params + [acc.cpu().numpy()]
    append_to_csv(result_path, to_write, header=header)
    snapshot(snapshot_path, model)
    #torch.save((accuracies, labels, predictions), data_path)

    if args.learn_curve != '':
        make_dirs_if_not_exist(args.learn_curve)
        torch.save((training_accuracies, test_accuracies), args.learn_curve)