def get_criterion_info_object(self, criterion): try: return self.mutant_info_object except AttributeError: minf_obj = MutantsInfoObject() mart_inf = os.path.join(self.mart_out, 'mutantsInfos.json') fdupes = os.path.join(self.mart_out, 'fdupes_duplicates.json') #in_mem_tce = os.path.join(self.mart_out, 'equidup-mutantsInfos.json') mart_inf_obj = common_fs.loadJSON(mart_inf) fduped_obj = common_fs.loadJSON(fdupes) #in_mem_tce_obj = common_fs.loadJSON(in_mem_tce) # remove equivalent et duplicates for mid, dups in list(fduped_obj.items()): for d_mid in dups: del mart_inf_obj[d_mid] # Add elements for mid, info in list(mart_inf_obj.items()): minf_obj.add_element(mid, mutant_type=info['Type'], \ mutant_locs=info['SrcLoc'], \ mutant_function=info['FuncName'], \ IRPosInFunc=info['IRPosInFunc']) self.mutant_info_object = minf_obj return minf_obj
def __init__(self, tests_working_dir, code_builds_factory, config, \ head_explorer, checkpointer, \ parent_meta_tool=None): # Set Constants self.compress_test_storage_dir = True # Set Direct Arguments Variables self.tests_working_dir = os.path.normpath(tests_working_dir) self.code_builds_factory = code_builds_factory self.config = config self.head_explorer = head_explorer self.checkpointer = checkpointer self.parent_meta_tool = parent_meta_tool # Verify Direct Arguments Variables ERROR_HANDLER.assert_true(self.tests_working_dir is not None, \ "Must specify tests_working_dir", __file__) # Set Indirect Arguments Variables ## Generate the tests into this folder (to be created by user) self.tests_storage_dir = os.path.join( self.tests_working_dir, "tests_files") self.tests_storage_dir_archive = os.path.join( self.tests_working_dir, "tests_files.tar.gz") self.custom_binary_dir = None if self.config.tool_user_custom is not None: self.custom_binary_dir = \ self.config.tool_user_custom.PATH_TO_TOOL_BINARY_DIR ERROR_HANDLER.assert_true(self.custom_binary_dir is None or \ os.path.isdir(self.custom_binary_dir), \ "specified custom_binary_dir does not exist", \ __file__) # Verify indirect Arguments Variables # Initialize Other Fields self.test_execution_time = {} self.test_execution_time_storage_file = os.path.join(\ self.tests_working_dir, "test_to_execution_time.json") self.shared_loc = multiprocessing.RLock() # Make Initialization Computation ## Create dirs if not os.path.isdir(self.tests_working_dir): self.clear_working_dir() if os.path.isfile(self.test_execution_time_storage_file): self.test_execution_time = common_fs.loadJSON(\ self.test_execution_time_storage_file) # decompress potential test storage archive if self.compress_test_storage_dir: if os.path.isfile(self.tests_storage_dir_archive): if os.path.isdir(self.tests_storage_dir): try: shutil.rmtree(self.tests_storage_dir) except PermissionError: self._dir_chmod777(self.tests_storage_dir) shutil.rmtree(self.tests_storage_dir) common_fs.TarGz.decompressDir(self.tests_storage_dir_archive)
def _extract_coverage_data_of_a_test(self, enabled_criteria, \ test_execution_verdict, result_dir_tmp): ''' read json files and extract data return: the dict of criteria with covering count ''' in_file = os.path.join(result_dir_tmp, self.cov_data_filename) cov_dat_obj = common_fs.loadJSON(in_file) cov_dat_obj = {TestCriteria[v]: cov_dat_obj[v] for v in cov_dat_obj} ERROR_HANDLER.assert_true(set(cov_dat_obj) == set(enabled_criteria), \ "mismatching criteria enabled", __file__) res = {c: {} for c in enabled_criteria} for c in cov_dat_obj: for filename, filedat in list(cov_dat_obj[c].items()): if filedat is not None: for id_, cov in list(filedat.items()): ident = DriversUtils.make_meta_element(\ str(id_), filename) res[c][ident] = cov # delete cov file os.remove(in_file) return res
def _update_tests_duplicates_map(self, recompute=False): if os.path.isfile(self.duplicated_tests_info_file) and not recompute: self.tests_duplicates_map = common_fs.loadJSON( self.duplicated_tests_info_file) return groups = {} for _, dat in self.testcases_configured_tools.items(): tobj = dat[self.TOOL_OBJ_KEY] fmt = tobj.get_test_format_class() if fmt is None: continue if fmt not in groups: groups[fmt] = [] groups[fmt].append(tobj) self.tests_duplicates_map = {} # for each group do fdupes and merge all for fmt, tt_list in groups.items(): if len(tt_list) > 1: kepttest2duptest_map, test2keptdup = \ fmt.cross_tool_fdupes(*tt_list) self.tests_duplicates_map.update(test2keptdup) if os.path.isdir(os.path.dirname(self.duplicated_tests_info_file)): common_fs.dumpJSON(self.tests_duplicates_map, \ self.duplicated_tests_info_file, pretty=True)
def get_instrumented_executable_paths_map(self, enabled_criteria): crit_to_exes_map = {} obj = common_fs.loadJSON(self.instrumentation_details) #exes = [p for _, p in list(obj.items())] exes = obj for criterion in enabled_criteria: crit_to_exes_map[criterion] = exes return crit_to_exes_map
def get_testcase_info_object(self): tc_info_obj = TestcasesInfoObject() ERROR_HANDLER.assert_true(os.path.isfile(self.test_list_storage_file),\ "No test list file found, did generation occur?", __file__) dtl = common_fs.loadJSON(self.test_list_storage_file) for tc, targs in dtl: tc_info_obj.add_test(tc, testargs=targs) return tc_info_obj
def __init__(self, filename=None): self.filename = filename if self.filename is None or not os.path.isfile(self.filename): self.data = {} else: self.data = {objective: self._mem_optimize_sub_dat(test2dat) \ for objective, test2dat in \ common_fs.loadJSON(self.filename).items()}
def test_loadJSON(self): jfilename = os.path.join(self._worktmpdir, "jsontmp.json") exp = {"x": 1, "y": [2, 3, "zz"]} with open(jfilename, 'w') as fp: fp.write(str(exp).replace("'", '"')) res = common_fs.loadJSON(jfilename) # load Json worked fine self.assertEqual(res, exp) os.remove(jfilename)
def get_instrumented_executable_paths_map(self, enabled_criteria): crit_to_exes_map = {} obj = common_fs.loadJSON(self.instrumentation_details) #exes = [p for _, p in list(obj.items())] for c, c_exes in list(obj.items()): for k in c_exes: c_exes[k] = os.path.join(self.mart_out, c_exes[k]) for criterion in enabled_criteria: crit_to_exes_map[criterion] = obj[criterion.get_str()] return crit_to_exes_map
def __init__(self, repository_manager, workdir=None): self.repository_manager = repository_manager self.workdir = workdir self.src_dest_fmt_to_handling_obj = {} self.code_conversion_tracker_file = None self.stored_files_mapping = None self.stored_files_mapping_file = None if self.workdir is not None: self.repo_path_saved_file = os.path.join(self.workdir, \ "saved_repo_path") self.code_conversion_tracker_file = \ os.path.join(self.workdir, "is_converting") self.stored_files_mapping_file = \ os.path.join(self.workdir, "files_map") exes, src_obj_map = \ self.repository_manager.get_relative_exe_path_map() if os.path.isdir(self.workdir): tmp = common_fs.loadJSON(self.stored_files_mapping_file) self.stored_files_mapping = {k: os.path.join(self.workdir, v) \ for k,v in tmp.items()} else: count = 0 self.stored_files_mapping = {} tmp = {} for f in exes: tmp[f] = str(count) self.stored_files_mapping[f] = \ os.path.join(self.workdir, str(count)) count += 1 for s, o in list(src_obj_map.items()): tmp[o] = str(count) self.stored_files_mapping[o] = \ os.path.join(self.workdir, str(count)) count += 1 os.mkdir(self.workdir) common_fs.dumpJSON(tmp, \ self.stored_files_mapping_file, pretty=True) # Initialize for src_fmt, obj_cls in formatfrom_function_tuples: if isinstance(obj_cls, ccs.IdentityCodeConverter): self._fmt_from_to_registration(src_fmt, src_fmt, obj_cls) else: ERROR_HANDLER.assert_true(\ src_fmt in obj_cls.get_source_formats(), \ "{} {} {} {}".format( \ "Error in 'formatfrom_function_tuples'", "src_fmt", src_fmt, "not in corresponding obj..."), __file__) for dest_fmt in obj_cls.get_destination_formats_for(src_fmt): self._fmt_from_to_registration(src_fmt, dest_fmt, obj_cls)
def collect_seeds(outdir, seeds_out, muteria_output, original_conf, toskip_added_devtest, compress_dest=True): # set the temporary conf tmp_conf = os.path.join(outdir, '_seed_conf.py') tmp_conf_template = os.path.join(os.path.dirname(__file__), \ global_conf_template_folder ,'get_seeds_conf.py') prev_sym_args_store_file = tmp_conf + ".symargs.json" with open(tmp_conf_template) as f: with open(tmp_conf, 'w') as g: o_c_dir = os.path.dirname(original_conf) o_c_module = os.path.splitext(os.path.basename(original_conf))[0] g.write(f.read().replace(ORIGINAL_CONF_DIR_KEY, o_c_dir)\ .replace(ORIGINAL_CONF_MODULE_KEY, o_c_module)\ .replace(MUTERIA_OUTPUT_KEY, muteria_output)\ .replace('__SYM_ARGS_STORE_FILE__', \ prev_sym_args_store_file)\ .replace(REMOVE_ADDED_DEVTESTS, str(toskip_added_devtest))) # run muteria if os.path.isdir(muteria_output): shutil.rmtree(muteria_output) if run_muteria_with_conf(tmp_conf) != 0: error_exit("Muteria failed during seed collection") # collect the seeds using muteria ktest ... print("# Generating seeds after muteria...") if os.path.isdir(seeds_out): shutil.rmtree(seeds_out) zesti_tests_dir = os.path.join(muteria_output, 'latest', \ 'testscases_workdir', 'shadow_se', 'tests_files.tar.gz') if os.path.isfile(prev_sym_args_store_file): prev_sym_args = common_fs.loadJSON(prev_sym_args_store_file) os.remove(prev_sym_args_store_file) klee_tests_dir_or_sym_args = prev_sym_args else: klee_tests_dir_or_sym_args = None ccks = ConvertCollectKtestsSeeds(custom_binary_dir=None) ccks.generate_seeds_from_various_ktests(seeds_out, \ zesti_tests_dir, \ src_new_klee_ktest_dir_or_sym_args=\ klee_tests_dir_or_sym_args, \ klee_ktest_is_sym_args=True, \ compress_dest=compress_dest, skip_failure=None) # delete muteria output os.remove(tmp_conf) shutil.rmtree(muteria_output)
def get_criterion_info_object(self, criterion): try: return self.mutant_info_object except AttributeError: minf_obj = MutantsInfoObject() gpmutation_inf_obj = common_fs.loadJSON(os.path.join( self.gpmutation_out, "mutantsInfo.json")) # Add elements for mid, info in list(gpmutation_inf_obj.items()): minf_obj.add_element(mid, mutant_type=info['Type'], \ mutant_locs=info['SrcLoc'], \ mutant_function_name=info['FuncName']) self.mutant_info_object = minf_obj return minf_obj
def _get_generation_time_of_test(test, test_top_dir): """ extract the generation timestamp of a test. the test and its top location are specified """ test_path = os.path.join(test_top_dir, test) # Seach the test and get the time. # If dup, get the time of the duplicate test # 1. If test_path exist, is is not dup to_search = None if os.path.isfile(test_path): to_search = test_path else: # seach the test it is dup of kepttest2duptest_map = common_fs.loadJSON(\ self.keptktest2dupktests) ERROR_HANDLER.asser_true(test not in kepttest2duptest_map, \ "Test file not existing but in kept (BUG)", \ __file__) for kept, dup_list in kepttest2duptest_map.items(): if test in dup_list: to_search = os.path.join(test_top_dir, kept) ERROR_HANDLER.assert_true(to_search is not None, \ "test not found in dup and inexistant", __file__) # 2. search for the test and its time ktestlists = glob.glob(os.path.join(os.path.dirname(to_search), \ "mutant-[0-9]*.ktestlist")) gen_time = None for klist in ktestlists: df = common_fs.loadCSV(klist, separator=",") val = df.loc[df.ktest==os.path.basename(to_search), \ 'ellapsedTime(s)'].values if len(val) == 1: gen_time = val[0] break ERROR_HANDLER.assert_true(len(val) == 0, "PB in ktestlist " + \ str(klist) + ". ktest appearing multiple times", __file__) ERROR_HANDLER.assert_true(gen_time is not None, \ "test not found in ktestlists", __file__) return gen_time
def __init__(self, *args, **kwargs): BaseTestcaseTool.__init__(self, *args, **kwargs) self.driver_config = None if self.config.get_tool_user_custom() is not None: self.driver_config = \ self.config.get_tool_user_custom().DRIVER_CONFIG if self.driver_config is None: self.driver_config = DriverConfigKlee() else: ERROR_HANDLER.assert_true(isinstance(self.driver_config, \ DriverConfigKlee),\ "invalid driver config", __file__) self.test_details_file = \ os.path.join(self.tests_working_dir, 'test_details.json') self.klee_used_tmp_build_dir = os.path.join(self.tests_working_dir, \ self._get_tool_name()+'_used_tmp_build_dir') # mapping between exes, to have a local copy for execution self.repo_exe_to_local_to_remote = {} self.keptktest2dupktests = os.path.join(self.tests_working_dir, \ 'kept_to_dup_ktests_map.json') self.ktest_with_must_exist_dir_file = os.path.join(\ self.tests_storage_dir, \ 'ktest_to_must_exist_dir.json') if os.path.isfile(self.ktest_with_must_exist_dir_file): self.ktest_with_must_exist_dir = common_fs.loadJSON(\ self.ktest_with_must_exist_dir_file) else: self.ktest_with_must_exist_dir = {} self.gen_tests_no_dup_with_seeds = \ self.driver_config.get_gen_tests_no_dup_with_seeds () if os.path.isdir(self.klee_used_tmp_build_dir): shutil.rmtree(self.klee_used_tmp_build_dir) os.mkdir(self.klee_used_tmp_build_dir)
def __init__(self, tests_working_dir, code_builds_factory, config, \ test_oracle_manager, checkpointer): # Set Constants # Set Direct Arguments Variables self.tests_working_dir = tests_working_dir self.code_builds_factory = code_builds_factory self.config = config self.test_oracle_manager = test_oracle_manager self.checkpointer = checkpointer # Verify Direct Arguments Variables ERROR_HANDLER.assert_true(self.tests_working_dir is not None, \ "Must specify tests_working_dir", __file__) # Set Indirect Arguments Variables ## Generate the tests into this folder (to be created by user) self.tests_storage_dir = os.path.join( self.tests_working_dir, "tests_files") self.test_oracle_dir = os.path.join( self.tests_working_dir, "test_oracles") # Verify indirect Arguments Variables # Initialize Other Fields self.test_execution_time = {} self.test_execution_time_storage_file = os.path.join(\ self.tests_working_dir, "test_to_execution_time.json") # Make Initialization Computation ## Create dirs if not os.path.isdir(self.tests_working_dir): self.clear_working_dir() self.test_oracle_manager.add_mapping(self, self.test_oracle_dir) if os.path.isfile(self.test_execution_time_storage_file): self.test_execution_time = common_fs.loadJSON(\ self.test_execution_time_storage_file)
def mutant_execution(outdir, avoid_meta_tests_list_file, avoid_meta_mutants_list_file, \ muteria_output, tg_conf, res): # set the temporary conf tmp_conf = os.path.join(outdir, '_mut_exec_conf.py') tmp_conf_template = os.path.join(os.path.dirname(__file__), \ global_conf_template_folder, 'mut_exe_conf.py') first_time = False cp_muteria = common_fs.loadJSON( os.path.join(muteria_output, 'latest', '_controller_dat', 'checkpoint_states', 'execution_state')) has_sm_matrix = os.path.isfile( os.path.join(muteria_output, 'latest', 'RESULTS_DATA', 'matrices', 'STRONG_MUTATION.csv')) if type(cp_muteria) == dict and cp_muteria[ "CHECKPOINT_DATA"] == "CHECK_POINTED_TASK_COMPLETED" and not has_sm_matrix: first_time = True with open(tmp_conf_template) as f: with open(tmp_conf, 'w') as g: tg_c_dir = os.path.dirname(tg_conf) tg_c_module = os.path.splitext(os.path.basename(tg_conf))[0] g.write(f.read().replace(TG_CONF_DIR_KEY, tg_c_dir)\ .replace(TG_CONF_MODULE_KEY, tg_c_module)\ .replace(MUTERIA_OUTPUT_KEY, muteria_output)\ .replace(FIRST_TIME_MUTANT_EXECUTION_KEY, str(first_time))\ .replace(AVOID_META_TESTS_LIST_FILE_KEY, avoid_meta_tests_list_file)\ .replace(AVOID_META_MUTANTS_LIST_FILE_KEY, avoid_meta_mutants_list_file)) # Run relevant mutant computation # Everything starting from test execution after all test generations (see cmtools/run.sh) TODO: implement it in mut_exe_conf if run_cmtools_run_sh_with_conf(tmp_conf, muteria_output, res) != 0: error_exit("cmtools/run.sh failed during mutant execution") # delete tmp conf os.remove(tmp_conf)
def _collect_temporary_coverage_data(self, criteria_name_list, \ test_execution_verdict, \ used_environment_vars, \ result_dir_tmp, \ testcase): ''' extract coverage data into json file in result_dir_tmp ''' cov_obj = coverage.Coverage(config_file=self.config_file) cov_obj.combine() tmp_dat_obj = cov_obj.get_data() in_dat_files = tmp_dat_obj.measured_files() # Get file map try: self.exes_rel except AttributeError: obj = common_fs.loadJSON(self.instrumentation_details) self.exes_abs = [] self.exes_rel = [] for rp, ap in list(obj.items()): self.exes_rel.append(rp) self.exes_abs.append(ap) self.exes_rel, self.exes_abs = zip(*sorted(\ zip(self.exes_rel, self.exes_abs),\ key=lambda x: x.count(os.path.sep)\ )) # get executables stmt and branches self.executable_lines = {} self.executable_arcs = {} for fn in self.exes_abs: pser = coverage.parser.PythonParser(filename=fn) pser.parse_source() self.executable_lines[fn] = pser.statements self.executable_arcs[fn] = pser.arcs() dat_obj = coverage.CoverageData() if len(in_dat_files) > 0: file_map = self.PathAliases(in_dat_files, self.exes_rel, \ self.used_srcs_dir, \ self._get_latest_top_output_dir(), \ self.code_builds_factory.\ repository_manager.repo_abs_path('')) dat_obj.update(tmp_dat_obj, aliases=file_map) # Get the coverages res = {c: {} for c in criteria_name_list} if TestCriteria.STATEMENT_COVERAGE in criteria_name_list: for rel_fn, abs_fn in zip(self.exes_rel, self.exes_abs): res[TestCriteria.STATEMENT_COVERAGE][rel_fn] = \ {ln: 0 for ln in self.executable_lines[abs_fn]} ln_list = dat_obj.lines(abs_fn) if ln_list is not None: res[TestCriteria.STATEMENT_COVERAGE][rel_fn].\ update({ln:1 for ln in ln_list}) if TestCriteria.BRANCH_COVERAGE in criteria_name_list: for rel_fn, abs_fn in zip(self.exes_rel, self.exes_abs): res[TestCriteria.BRANCH_COVERAGE][rel_fn] = \ {str(an): 0 for an in self.executable_arcs[abs_fn]} an_list = dat_obj.arcs(abs_fn) if an_list is not None: res[TestCriteria.BRANCH_COVERAGE][rel_fn].\ update({str(an):1 for an in an_list}) # save the temporary coverage res_str = {v.get_str(): res[v] for v in res} common_fs.dumpJSON(res_str, os.path.join(result_dir_tmp,\ self.cov_data_filename)) cov_obj.erase() # clean any possible raw data file for file_ in glob.glob(self.raw_data_file + "*"): os.remove(file_)
def _get_fault_tests(cm_corebench_scripts_dir, c_id, conf_py, in_res_data_dir, outdir, get_difference=True): if not os.path.isdir(outdir): os.mkdir(outdir) exe_dir = os.path.join(cm_corebench_scripts_dir, "bug_fixing_exes", c_id) exe_file = os.listdir(os.path.join(exe_dir, "old")) if len(exe_file) != 1: error_exit("not one file for old") exe_file = exe_file[0] fail_test_execution = os.path.join(outdir, "fail_test_checking") if os.path.isdir(fail_test_execution): shutil.rmtree(fail_test_execution) if not os.path.isdir(fail_test_execution): os.makedirs(fail_test_execution) bug_finding_tests_list = os.path.join(fail_test_execution, "fault_reveling_tests.txt") # temporary test_list_file = os.path.join(fail_test_execution, "test_list.tmp") pass_fail_matrix = os.path.join(in_res_data_dir, "matrices", "PASSFAIL.csv") pf_mat = common_matrices.ExecutionMatrix(filename=pass_fail_matrix) with open(test_list_file, "w") as f: for test in pf_mat.get_nonkey_colname_list(): f.write(test + "\n") nohash = ['--nohashoutlog'] #nohash = [] print("# info: running bug-fix old ...") version = "old" custom_exe = os.path.join(exe_dir, version, exe_file) stdin = "{}\n{}\n{}\n{}\n".format( "tests", os.path.join(fail_test_execution, version), '{"src/' + exe_file + '": "' + custom_exe + '"}', test_list_file) if os.system(" ".join([ "printf", "'" + stdin + "'", "|", "muteria", "--config", conf_py, "--lang", "c", "customexec" ] + nohash)) != 0: assert False, "bug-fix old failed" print("# info: running bug-fix new ...") version = "new" custom_exe = os.path.join(exe_dir, version, exe_file) stdin = "{}\n{}\n{}\n{}\n".format( "tests", os.path.join(fail_test_execution, version), '{"src/' + exe_file + '": "' + custom_exe + '"}', test_list_file) if os.system(" ".join([ "printf", "'" + stdin + "'", "|", "muteria", "--config", conf_py, "--lang", "c", "customexec" ] + nohash)) != 0: assert False, "bug-fix new failed" _extract_list(fail_test_execution, bug_finding_tests_list) if get_difference: # get differences in bug introducing ## get diff exe file diff_exe_dir = os.path.join(os.path.dirname(in_res_data_dir), 'code_build_workdir') map_file_ = os.path.join(diff_exe_dir, 'files_map') map_ = common_fs.loadJSON(map_file_) diff_exe_file = None for f, dest in map_.items(): if os.path.basename(f) == exe_file: assert diff_exe_file is None, "multiple exefile found in defference" diff_exe_file = os.path.join(diff_exe_dir, dest) assert os.path.isfile( diff_exe_file), "missing diff_exe_file: " + diff_exe_file assert diff_exe_file is not None, "diff exe file not found" ## Execution diff_test_execution = os.path.join(outdir, "diff_test_checking") diff_finding_tests_list = os.path.join(diff_test_execution, "diff_reveling_tests.txt") if os.path.isdir(diff_test_execution): shutil.rmtree(diff_test_execution) if not os.path.isdir(diff_test_execution): os.makedirs(diff_test_execution) print("# info: running bug-intro old ...") version = "old" stdin = "{}\n{}\n{}\n{}\n".format( "tests", os.path.join(diff_test_execution, version), '{"src/' + exe_file + '": "' + diff_exe_file + '"}', test_list_file) if os.system(" ".join([ "printf", "'" + stdin + "'", "|", "KLEE_CHANGE_RUNTIME_SET_OLD_VERSION=on", "muteria", "--config", conf_py, "--lang", "c", "customexec" ] + nohash)) != 0: assert False, "bug-intro old failed" print("# info: running bug-intro new ...") version = "new" stdin = "{}\n{}\n{}\n{}\n".format( "tests", os.path.join(diff_test_execution, version), '{"src/' + exe_file + '": "' + diff_exe_file + '"}', test_list_file) if os.system(" ".join([ "printf", "'" + stdin + "'", "|", "muteria", "--config", conf_py, "--lang", "c", "customexec" ] + nohash)) != 0: assert False, "bug-intro new failed" _extract_list(diff_test_execution, diff_finding_tests_list) os.remove(test_list_file)
def main(): parser = argparse.ArgumentParser() parser.add_argument('outdir', help="outdir of the execution") parser.add_argument('original_conf', help="config of the project") parser.add_argument('--cleanstart', action="store_true", \ help="delete out and restart") parser.add_argument('--only_gentests', action="store_true", \ help="only generate tests, no mutant execution") parser.add_argument('--only_getseeds', action="store_true", \ help="only collect the seeds, no tests generation or mutant execution") parser.add_argument('--no_summary', action="store_true", \ help="Disable Summary") args = parser.parse_args() outdir = os.path.abspath(args.outdir) original_conf = os.path.abspath(args.original_conf) if args.cleanstart: if os.path.isdir(outdir): shutil.rmtree(outdir) if not os.path.isdir(outdir): os.mkdir(outdir) # XXX do_remove_added_devtests = args.only_gentests seeds_dir = os.path.join(outdir, "ktests_seeds") checkpoint_file = os.path.join(outdir, 'checkpoint.json') tg_conf = os.path.join(outdir, "tg_conf.py") muteria_output = os.path.join(outdir, "muteria_output") mut_ex_prepa_data_dir = os.path.join(outdir, "mut_ex_prepa_data") summarized_data_dir = os.path.join(outdir, "summarized_data_dir") mutant_exec_res = os.path.join(outdir, "mutant_exec_res") avoid_meta_mutants_list_file = os.path.join(outdir, mut_ex_prepa_data_dir, \ "avoid_meta_mutants_list_file.txt") avoid_meta_tests_list_file = os.path.join(outdir, mut_ex_prepa_data_dir, \ "avoid_meta_tests_list_file.txt") # load added dev tests added_devtest_map_file = os.path.join(os.path.dirname(__file__), 'added_tests', 'added_tests_map.json') added_devtest_map = common_fs.loadJSON(added_devtest_map_file) cur_id = os.path.basename(outdir) if do_remove_added_devtests: if cur_id not in added_devtest_map: error_exit("do_remove_added_devtests is true but id not in map") toskip_added_devtest = added_devtest_map[cur_id] else: toskip_added_devtest = [] # check checkpoint and load if necessary cp_data = { SEED_COLLECTION: False, TEST_GENERATION: False, MUTANT_EXECUTION_PREPA: False, MUTANT_EXECUTION: False, DATA_SUMMARIZATION: False, } if args.only_getseeds: cp_data[TEST_GENERATION] = True cp_data[MUTANT_EXECUTION_PREPA] = True cp_data[MUTANT_EXECUTION] = True cp_data[DATA_SUMMARIZATION] = True if args.only_gentests: cp_data[MUTANT_EXECUTION_PREPA] = True cp_data[MUTANT_EXECUTION] = True if args.no_summary: cp_data[DATA_SUMMARIZATION] = True if os.path.isfile(checkpoint_file): cp_data = common_fs.loadJSON(checkpoint_file) if not cp_data[SEED_COLLECTION]: print("#(DBG): Executing {} ...".format(SEED_COLLECTION)) collect_seeds(outdir, seeds_dir, muteria_output, original_conf, toskip_added_devtest) cp_data[SEED_COLLECTION] = True common_fs.dumpJSON(cp_data, checkpoint_file, pretty=True) if not cp_data[TEST_GENERATION]: print("#(DBG): Executing {} ...".format(TEST_GENERATION)) generate_tests (outdir, seeds_dir, muteria_output, original_conf, \ tg_conf, args.only_gentests, toskip_added_devtest, args.only_gentests) cp_data[TEST_GENERATION] = True common_fs.dumpJSON(cp_data, checkpoint_file, pretty=True) if not cp_data[MUTANT_EXECUTION_PREPA]: print("#(DBG): Executing {} ...".format(MUTANT_EXECUTION_PREPA)) prepare_mutant_execution(outdir, muteria_output, original_conf, \ mut_ex_prepa_data_dir, avoid_meta_tests_list_file,\ avoid_meta_mutants_list_file) cp_data[MUTANT_EXECUTION_PREPA] = True common_fs.dumpJSON(cp_data, checkpoint_file, pretty=True) if not cp_data[MUTANT_EXECUTION]: print("#(DBG): Executing {} ...".format(MUTANT_EXECUTION)) mutant_execution(outdir, avoid_meta_tests_list_file, avoid_meta_mutants_list_file, \ muteria_output, tg_conf, mutant_exec_res) cp_data[MUTANT_EXECUTION] = True common_fs.dumpJSON(cp_data, checkpoint_file, pretty=True) if not cp_data[DATA_SUMMARIZATION]: print("#(DBG): Executing {} ...".format(DATA_SUMMARIZATION)) if args.only_gentests: cm_corebench_scripts_dir = os.path.join( os.path.dirname( os.path.dirname( os.path.dirname(os.path.dirname(original_conf)))), "cm_corebench_scripts") c_id = os.path.basename(outdir) get_fault_tests.fault_analysis (cm_corebench_scripts_dir, c_id, tg_conf, \ muteria_output, summarized_data_dir) else: summarize_data(outdir, summarized_data_dir, mutant_exec_res, \ mut_ex_prepa_data_dir) cp_data[DATA_SUMMARIZATION] = True common_fs.dumpJSON(cp_data, checkpoint_file, pretty=True) print("\n# DONE!\n")
def summarize_data(outdir, summarized_data_dir, mutant_exec_res, \ mut_ex_prepa_data_dir): other_rel_to_reltests_file = os.path.join( mut_ex_prepa_data_dir, 'relevantmuts_to_relevanttests.json') other_rel_to_reltests = common_fs.loadJSON(other_rel_to_reltests_file) stored_map_file = os.path.join(mut_ex_prepa_data_dir, 'stored_test_map.json') stored_map = common_fs.loadJSON(stored_map_file) def get_too2relmuts(relmuts_to_reltests, toollist=None): res = {tool: set() for tool in toollist} if toollist is not None else {} for relmut, t_list in relmuts_to_reltests.items(): for meta_t in t_list: toolalias, test = DriversUtils.reverse_meta_element(meta_t) if toollist is None: if toolalias not in res: res[toolalias] = set() else: assert toolalias in toollist, "PB: toolalias ({}) not in toollist ({})".format( toolalias, toollist) res[toolalias].add(relmut) for toolalias in res: res[toolalias] = list(res[toolalias]) return res #~ def get_too2relmuts() other_tool_to_relmuts = get_too2relmuts(other_rel_to_reltests) all_tests, fail_tests, relevant_mutants_to_relevant_tests, \ mutants_to_killingtests, tests_to_killed_mutants = load.load (mutant_exec_res, fault_revealing=False) if not os.path.isdir(summarized_data_dir): os.mkdir(summarized_data_dir) cmp_result_file = os.path.join(summarized_data_dir, "compare_results.json") initial_relmuts_file = os.path.join(summarized_data_dir, "initial_relevant.json") rMS_file = os.path.join(summarized_data_dir, "rMS.json") # get relevant muts by tools # get tool list tinf = common_fs.loadJSON( os.path.join( mutant_exec_res, "post/RESULTS_DATA/other_copied_results/testcasesInfos.json")) tool2relmuts = get_too2relmuts( relevant_mutants_to_relevant_tests, toollist=list(set(tinf["CUSTOM"]) - {"custom_devtests"})) # update with stored map for relmut, t_list in other_rel_to_reltests.items(): for other_meta_t in t_list: if other_meta_t in stored_map: for meta_t in stored_map[other_meta_t]: toolalias, test = DriversUtils.reverse_meta_element(meta_t) assert toolalias in tool2relmuts, "PB2: " tool2relmuts[toolalias].append(relmut) for toolalias in tool2relmuts: tool2relmuts[toolalias] = list(set(tool2relmuts[toolalias])) common_fs.dumpJSON(tool2relmuts, cmp_result_file, pretty=True) common_fs.dumpJSON(other_tool_to_relmuts, initial_relmuts_file, pretty=True) # Compute and print the relevant mutation score per tool all_rel_muts = set() devtest_rel = set() for talias, rellist in tool2relmuts.items(): all_rel_muts |= set(rellist) for talias, rellist in other_tool_to_relmuts.items(): all_rel_muts |= set(rellist) if talias.startswith("custom_devtests"): devtest_rel |= set(rellist) rMS = {"ALL-GENONLY": {}, "ALL-ALL": {}, "ADDITIONAL": {}} for talias, rellist in tool2relmuts.items(): rMS["ALL-ALL"][talias] = len(set(rellist) | devtest_rel) * 100.0 / len(all_rel_muts) rMS["ALL-GENONLY"][talias] = len(rellist) * 100.0 / len(all_rel_muts) rMS["ADDITIONAL"][talias] = len(set( rellist) - devtest_rel) * 100.0 / len(all_rel_muts - devtest_rel) common_fs.dumpJSON(rMS, rMS_file, pretty=True)
def read_from_file(self, filepath): obj = common_fs.loadJSON(filepath) for k in obj: self.add_map(k, obj[k])
def load_from_file(self, file_path): self.data = common_fs.loadJSON(file_path)
def __init__(self, config, explorer, criterion, jsonfile=None): ERROR_HANDLER.assert_true(jsonfile is not None, \ "jsonfile cannot be None", __file__) jsonobj = common_fs.loadJSON(jsonfile) DictOpt.__init__(self, config, explorer, criterion, dictobj=jsonobj)
def _setup_repository(self): # Make sure the repo dir exists ERROR_HANDLER.assert_true(os.path.isdir(self.repository_rootdir), \ "given repository dir is not existing: {}". format( \ self.repository_rootdir), __file__) # make sure the repo dir is a git repo # if no, ask the user whether to initialize and initialize or abort # if yes or user accepted initialize, get the git object for the repo try: repo = git_repo(self.repository_rootdir) gitobj = repo.git except git_exc.InvalidGitRepositoryError: make_it_git = common_mix.confirm_execution("{} {} {} {}".format(\ "The given repository directory is not", \ "a git repository, this must be a git", \ "repository to proceed.\n Do you want to",\ "set is as git repository?")) if make_it_git: repo = git_repo.init(self.repository_rootdir) gitobj = repo.git else: ERROR_HANDLER.error_exit("{} {}".format(\ "Must make the repository as git repository,", \ "then re-run"), __file__) # Check whether the repo is already managed by muteria ## if using branch if self.delete_created_on_revert_as_initial: if self.test_branch_name not in self._get_branches_list(\ self.repository_rootdir): ## Not managed, create branch self._make_testing_branch(self.repository_rootdir, \ self.test_branch_name) # checkout gitobj.checkout(self.test_branch_name) # Actual Check whether the repo is already managed by muteria # There must be a directory DEFAULT_MUTERIA_REPO_META_FOLDER src_in_prev = set() if os.path.isdir(self.muteria_metadir): ## Managed prev_src_list, prev_use_branch = \ common_fs.loadJSON(self.muteria_metadir_info_file) ERROR_HANDLER.assert_true(prev_use_branch == \ self.delete_created_on_revert_as_initial, \ "{} {} {} {} {}".format(\ "unmatching repo backup type.", \ "previously, use branch was", \ prev_use_branch, ", now it is",\ self.delete_created_on_revert_as_initial), \ __file__) src_in_prev = set(prev_src_list) & set(self.source_files_list) prev_src_list = set(prev_src_list) - set(self.source_files_list) remain_prev_src_set = {src for src in prev_src_list \ if os.path.isfile(self.repo_abs_path(src))} # make sure that all prev_src are in initial state untracked_diff_files = self._get_untracked_and_diffed(repo) prev_untracked_diff = remain_prev_src_set & untracked_diff_files if len(prev_untracked_diff) > 0: bypass = common_mix.confirm_execution(\ "{} {} {} {} {}".format( "the following files were previously used as",\ "src files by muteria and are now untracked:",\ prev_untracked_diff, \ "\nDo you want to handle it or bypass it?",\ "Choose yes to bypass: "******"{} {}".format(\ "Do you want to automatically restore the",\ "The untracked previous source and continue?")) if revert_them: for src in prev_untracked_diff: self.revert_repository_file(src, gitobj=gitobj) else: ERROR_HANDLER.error_exit(\ "Handle it manually and restart the execution") # update the info_file if set(prev_src_list) != set(self.source_files_list): common_fs.dumpJSON([self.source_files_list, \ self.delete_created_on_revert_as_initial],\ self.muteria_metadir_info_file) else: ## Not managed os.mkdir(self.muteria_metadir) common_fs.dumpJSON([self.source_files_list, \ self.delete_created_on_revert_as_initial],\ self.muteria_metadir_info_file) # Make sure all source files of interest are tracked untracked_diff_files = self._get_untracked_and_diffed(repo) untracked_diff_src_in_prev = untracked_diff_files & src_in_prev if len(untracked_diff_src_in_prev) > 0: if common_mix.confirm_execution(\ "{} {} {} {} {}".format("The following source",\ "files of interest are untracked", \ "and will be reverted (previous execution unfinished):", \ src_in_prev, \ "do you want to revert them?")): for src in src_in_prev: self.revert_repository_file(src, gitobj=gitobj) else: ERROR_HANDLER.error_exit("{} {}".format(\ "Handle untracked source files manually", \ "then restart the execution")) untracked_diff_files = self._get_untracked_and_diffed(repo) untracked_diff_src_files = set(self.source_files_list) & \ untracked_diff_files if len(untracked_diff_src_files) > 0: if common_mix.confirm_execution(\ "{} {} {} {}".format("The following source",\ "files of interest are untracked:", \ untracked_diff_src_files, \ "do you want to track them?")): repo.index.add(list(untracked_diff_src_files)) else: ERROR_HANDLER.error_exit("{} {}".format(\ "Handle untracked source files manually", \ "then restart the execution"))
def _extract_coverage_data_of_a_test(self, enabled_criteria, \ test_execution_verdict, result_dir_tmp): ''' read json files and extract data return: the dict of criteria with covering count # TODO: Restrict to returning coverage of specified headers files ''' gcov_list = common_fs.loadJSON(os.path.join(result_dir_tmp,\ self.gcov_files_list_filename)) res = {c: {} for c in enabled_criteria} func_cov = None branch_cov = None statement_cov = {} if TestCriteria.FUNCTION_COVERAGE in enabled_criteria: func_cov = res[TestCriteria.FUNCTION_COVERAGE] if TestCriteria.BRANCH_COVERAGE in enabled_criteria: branch_cov = res[TestCriteria.BRANCH_COVERAGE] if TestCriteria.STATEMENT_COVERAGE in enabled_criteria: statement_cov = res[TestCriteria.STATEMENT_COVERAGE] # Sources of interest _, src_map = self.code_builds_factory.repository_manager.\ get_relative_exe_path_map() #logging.debug("gcov_list: {}".format(gcov_list)) for gcov_file in gcov_list: with open(gcov_file) as fp: last_line = None src_file = None for raw_line in fp: line = raw_line.strip() col_split = [v.strip() for v in line.split(':')] if len(col_split) > 2 and col_split[1] == '0': # preamble if col_split[2] == "Source": src_file = os.path.normpath(col_split[3]) if not src_file in src_map: s_cand = None for s in src_map.keys(): if s.endswith(os.sep + src_file): ERROR_HANDLER.assert_true(s_cand is None,\ "multiple candidate, maybe same "+\ "source name in different dirs "+\ "for source {}".format(src_file), \ __file__) s_cand = s if s_cand is None: # If src file does not represent (not equal # and not relatively equal, for case where # build happens not in rootdir), # src not in considered break else: # ensure compatibility in matrices src_file = s_cand elif line.startswith("function "): # match function parts = line.split() ident = DriversUtils.make_meta_element(parts[1], \ src_file) func_cov[ident] = int(parts[3]) elif line.startswith("branch "): # match branch parts = line.split() ident = DriversUtils.make_meta_element(parts[1], \ last_line) if parts[2:4] == ['never', 'executed']: branch_cov[ident] = 0 else: branch_cov[ident] = int(parts[3]) elif len(col_split) > 2 and \ re.match(r"^\d+$", col_split[1]): # match line if col_split[0] == '-': continue last_line = DriversUtils.make_meta_element(\ col_split[1], src_file) if col_split[0] in ('#####', '====='): exec_count = 0 else: exec_count = \ int(re.findall(r'^\d+', col_split[0])[0]) statement_cov[last_line] = exec_count # delete gcov files for gcov_f in self._get_gcov_list(): os.remove(gcov_f) #logging.debug("gcov res is: {}".format(res)) return res
def _extract_coverage_data_of_a_test(self, enabled_criteria, \ test_execution_verdict, result_dir_tmp): ''' read json files and extract data return: the dict of criteria with covering count # TODO: Restrict to returning coverage of specified headers files ''' gcov_list = common_fs.loadJSON(os.path.join(result_dir_tmp,\ self.gcov_files_list_filename)) res = {c: {} for c in enabled_criteria} func_cov = None branch_cov = None statement_cov = {} if TestCriteria.FUNCTION_COVERAGE in enabled_criteria: func_cov = res[TestCriteria.FUNCTION_COVERAGE] if TestCriteria.BRANCH_COVERAGE in enabled_criteria: branch_cov = res[TestCriteria.BRANCH_COVERAGE] if TestCriteria.STATEMENT_COVERAGE in enabled_criteria: statement_cov = res[TestCriteria.STATEMENT_COVERAGE] # Sources of interest _, src_map = self.code_builds_factory.repository_manager.\ get_relative_exe_path_map() for gcov_file in gcov_list: with open(gcov_file) as fp: last_line = None src_file = None for raw_line in fp: line = raw_line.strip() col_split = [v.strip() for v in line.split(':')] if len(col_split) > 2 and col_split[1] == '0': # preamble if col_split[2] == "Source": src_file = col_split[3] if src_file not in src_map: # src not in considered break elif line.startswith("function "): # match function parts = line.split() ident = DriversUtils.make_meta_element(parts[1], \ src_file) func_cov[ident] = int(parts[3]) elif line.startswith("branch "): # match branch parts = line.split() ident = DriversUtils.make_meta_element(parts[1], \ last_line) branch_cov[ident] = int(parts[3]) elif len(col_split) > 2 and \ re.match(r"^\d+$", col_split[1]): # match line if col_split[0] == '-': continue last_line = DriversUtils.make_meta_element(\ col_split[1], src_file) if col_split[0] in ('#####', '====='): exec_count = 0 else: exec_count = \ int(re.findall(r'^\d+', col_split[0])[0]) statement_cov[last_line] = exec_count # delete gcov files for gcov_f in self._get_gcov_list(): os.remove(gcov_f) return res
def get_instrumented_executable_paths_map(self, enabled_criteria): using_gdb_wrapper = self.driver_config.get_use_gdb_wrapper() if using_gdb_wrapper: # use wrapper if gdb is installed using_gdb_wrapper = DriversUtils.check_tool(prog='gdb', \ args_list=['--version'], \ expected_exit_codes=[0]) if using_gdb_wrapper: # XXX: Docker has issues running programs in Docker, # unless the container is ran with the following arguments: # # docker run --cap-add=SYS_PTRACE \ # --security-opt seccomp=unconfined ... # # We check that it is fine by testing on echo ret, o_e, _ = DriversUtils.execute_and_get_retcode_out_err( prog='gdb', \ args_list=['--batch-silent', \ '--quiet', '--return-child-result', '-ex', 'run', '--args', 'echo'], \ )#out_on=False, err_on=False) using_gdb_wrapper = (ret == 0) if not using_gdb_wrapper: logging.warning("use gdb is enabled but call to gdb fails" " (retcode {}) with msg: {}".format( ret, o_e)) else: logging.warning("use gdb is enabled but gdb is not installed") crit_to_exes_map = {} obj = common_fs.loadJSON(self.instrumentation_details) #exes = [p for _, p in list(obj.items())] for name in obj: obj[name] = os.path.join(self.instrumented_code_storage_dir, \ obj[name]) if using_gdb_wrapper: # Using GDB WRAPPER with open(self.gcov_gdb_wrapper_template) as f: template_str = f.read() single_exe = obj[list(obj)[0]] template_str = template_str.replace(\ 'MUTERIA_GCOV_PROGRAMEXE_PATHNAME', single_exe) with open(self.gcov_gdb_wrapper_sh, 'w') as f: f.write(template_str) shutil.copymode(single_exe, self.gcov_gdb_wrapper_sh) obj[list(obj)[0]] = self.gcov_gdb_wrapper_sh exes_map = obj for criterion in enabled_criteria: crit_to_exes_map[criterion] = exes_map #logging.debug("DBG: {} {} {}".format(\ # "Using gdb wrapper is {}.".format(using_gdb_wrapper), \ # "crit_to_exe_map is {}.".format(crit_to_exes_map), \ # "wraper path is {}!".format(self.gcov_gdb_wrapper_sh))) return crit_to_exes_map
def __init__(self, filename=None): self.filename = filename if self.filename is None or not os.path.isfile(self.filename): self.data = {} else: self.data = common_fs.loadJSON(self.filename)
def _execute_task(self, task): """ TODO: 1. implement the todos here """ # @Checkpointing: see whether the task is untouch to clear tmp task_untouched = self.cp_data.tasks_obj.task_is_untouched(task) # Execute base on the task: --------------------------------------- if task == checkpoint_tasks.Tasks.STARTING: pass elif task == checkpoint_tasks.Tasks.FINISHED: pass elif task == checkpoint_tasks.Tasks.TESTS_GENERATION_GUIDANCE: pass #TODO: Generate focus criteria and tests and store in json file elif task == checkpoint_tasks.Tasks.TESTS_GENERATION: # @Checkpointing if task_untouched: self.meta_testcase_tool.clear_working_dir() self.cp_data.tasks_obj.set_task_executing(task) self.checkpointer.write_checkpoint(self.cp_data.get_json_obj()) # Generate the tests self.meta_testcase_tool.generate_tests(\ test_tool_type_list=self.cp_data.test_types) # @Checkpointing self.cp_data.tasks_obj.set_task_completed(task) self.checkpointer.write_checkpoint(self.cp_data.get_json_obj()) # Destroy meta test checkpointer self.meta_testcase_tool.get_checkpoint_state_object()\ .destroy_checkpoint() elif task == checkpoint_tasks.Tasks.\ TESTS_EXECUTION_SELECTION_PRIORITIZATION: out_file_key = outdir_struct.TMP_SELECTED_TESTS_LIST out_file = self.head_explorer.get_file_pathname(out_file_key) # @Checkpointing if task_untouched: self.head_explorer.remove_file_and_get(out_file_key) self.cp_data.tasks_obj.set_task_executing(task) self.checkpointer.write_checkpoint(self.cp_data.get_json_obj()) all_tests = self.meta_testcase_tool.\ get_testcase_info_object().get_tests_list() candidate_aliases = \ self.meta_testcase_tool.get_candidate_tools_aliases(\ test_tool_type_list=self.cp_data.test_types) selected_tests = [] for meta_test in all_tests: toolalias, _ = DriversUtils.reverse_meta_element(meta_test) if toolalias in candidate_aliases: selected_tests.append(meta_test) # TODO: use the actual selector. For now just use all tests common_fs.dumpJSON(list(selected_tests), out_file) # @Checkpointing self.cp_data.tasks_obj.set_task_completed(task) self.checkpointer.write_checkpoint(self.cp_data.get_json_obj()) # Destroy meta test checkpointer #self.meta_testexec_optimization_tool.get_checkpoint_state_object()\ # .destroy_checkpoint() elif task == checkpoint_tasks.Tasks.PASS_FAIL_TESTS_EXECUTION: # Make sure that the Matrices dir exists self.head_explorer.get_or_create_and_get_dir(\ outdir_struct.RESULTS_MATRICES_DIR) matrix_file_key = outdir_struct.TMP_TEST_PASS_FAIL_MATRIX matrix_file = self.head_explorer.get_file_pathname(matrix_file_key) execoutput_file_key = \ outdir_struct.TMP_PROGRAM_TESTEXECUTION_OUTPUT if self.config.GET_PASSFAIL_OUTPUT_SUMMARY: execoutput_file = self.head_explorer.get_file_pathname(\ execoutput_file_key) else: execoutput_file = None # @Checkpointing if task_untouched: self.head_explorer.remove_file_and_get(matrix_file_key) self.head_explorer.remove_file_and_get(execoutput_file_key) self.meta_testcase_tool.get_checkpoint_state_object()\ .restart_task() self.cp_data.tasks_obj.set_task_executing(task) self.checkpointer.write_checkpoint(self.cp_data.get_json_obj()) # Execute tests test_list_file = self.head_explorer.get_file_pathname(\ outdir_struct.TMP_SELECTED_TESTS_LIST) meta_testcases = common_fs.loadJSON(test_list_file) # Set test oracle self.test_oracle_manager.set_oracle(passfail=True) self.meta_testcase_tool.runtests(meta_testcases=meta_testcases, \ stop_on_failure=\ self.config.STOP_TESTS_EXECUTION_ON_FAILURE, \ recalculate_execution_times=True, \ fault_test_execution_matrix_file=matrix_file, \ fault_test_execution_execoutput_file=execoutput_file, \ test_prioritization_module=\ self.meta_testexec_optimization_tool, \ finish_destroy_checkpointer=False) # Unset test oracle self.test_oracle_manager.set_oracle(passfail=False) # @Checkpointing self.cp_data.tasks_obj.set_task_completed(task) self.checkpointer.write_checkpoint(self.cp_data.get_json_obj()) # Destroy meta test checkpointer self.meta_testcase_tool.get_checkpoint_state_object()\ .destroy_checkpoint() elif task == checkpoint_tasks.Tasks.CRITERIA_GENERATION_GUIDANCE: pass #TODO (Maybe could be used someday. for now, just skip it) elif task == checkpoint_tasks.Tasks.CRITERIA_GENERATION: # @Checkpointing if task_untouched: self.meta_criteria_tool.get_checkpoint_state_object()\ .restart_task() self.cp_data.tasks_obj.set_task_executing(task) self.checkpointer.write_checkpoint(self.cp_data.get_json_obj()) self.meta_criteria_tool.instrument_code(criteria_enabled_list=\ self.config.ENABLED_CRITERIA.get_val(), \ finish_destroy_checkpointer=False) # @Checkpointing self.cp_data.tasks_obj.set_task_completed(task) self.checkpointer.write_checkpoint(self.cp_data.get_json_obj()) # Destroy meta test checkpointer self.meta_criteria_tool.get_checkpoint_state_object()\ .destroy_checkpoint() elif task == checkpoint_tasks.Tasks.\ CRITERIA_EXECUTION_SELECTION_PRIORITIZATION: pass #TODO (Maybe could be used someday. for now, just skip it) elif task == checkpoint_tasks.Tasks.CRITERIA_TESTS_EXECUTION: # Make sure that the Matrices dir exists self.head_explorer.get_or_create_and_get_dir(\ outdir_struct.RESULTS_MATRICES_DIR) matrix_files_keys = {} matrix_files = {} execoutput_files_keys = {} execoutput_files = {} for criterion in self.config.ENABLED_CRITERIA.get_val(): matrix_files_keys[criterion] = \ outdir_struct.TMP_CRITERIA_MATRIX[criterion] matrix_files[criterion] = \ self.head_explorer.get_file_pathname(\ matrix_files_keys[criterion]) execoutput_files_keys[criterion] = \ outdir_struct.TMP_CRITERIA_EXECUTION_OUTPUT[criterion] if criterion in self.config.CRITERIA_WITH_OUTPUT_SUMMARY: execoutput_files[criterion] = \ self.head_explorer.get_file_pathname(\ execoutput_files_keys[criterion]) else: execoutput_files[criterion] = None # @Checkpointing if task_untouched: for _, matrix_file_key in list(matrix_files_keys.items()): self.head_explorer.remove_file_and_get(matrix_file_key) for _, execoutput_file_key in list(\ execoutput_files_keys.items()): self.head_explorer.remove_file_and_get(execoutput_file_key) self.meta_criteria_tool.get_checkpoint_state_object()\ .restart_task() self.cp_data.tasks_obj.set_task_executing(task) self.checkpointer.write_checkpoint(self.cp_data.get_json_obj()) criteria_set_sequence = self.config.CRITERIA_SEQUENCE.get_val() #if criteria_set_sequence is None: # criteria_set_sequence = criteria_pkg.CRITERIA_SEQUENCE for cs_pos, criteria_set in enumerate(criteria_set_sequence): criteria_set &= set(matrix_files) if len(criteria_set) == 0: continue # Was it already checkpointed w.r.t criteria set seq if self.cp_data.criteria_set_is_executed(cs_pos, criteria_set): continue # If we have a new criteria set id self.cp_data.switchto_new_criteria_set(cs_pos, criteria_set) # get matrices by criteria test_list_file = self.head_explorer.get_file_pathname(\ outdir_struct.TMP_SELECTED_TESTS_LIST) meta_testcases = common_fs.loadJSON(test_list_file) criterion_to_matrix = {\ c: matrix_files[c] for c in criteria_set} # TODO: chack set based on the criteria for which execout is enabled criterion_to_execoutput = {\ c: execoutput_files[c] for c in criteria_set} # Set test oracle self.test_oracle_manager.set_oracle(criteria_on=criteria_set) # execute self.meta_criteria_tool.runtests_criteria_coverage( \ testcases=meta_testcases, \ criterion_to_matrix=criterion_to_matrix, \ criterion_to_executionoutput=\ criterion_to_execoutput, \ cover_criteria_elements_once=self.config.\ COVER_CRITERIA_ELEMENTS_ONCE.get_val(),\ prioritization_module_by_criteria=\ self.meta_criteriaexec_optimization_tools,\ finish_destroy_checkpointer=True) # Update matrix if needed to have output diff or such # TODO: add CRITERIA_REQUIRING_OUTDIFF_WITH_PROGRAM to config for crit in criteria_set & set(self.config.\ CRITERIA_REQUIRING_OUTDIFF_WITH_PROGRAM.\ get_val()): pf_matrix_file = self.head_explorer.get_file_pathname(\ outdir_struct.TMP_TEST_PASS_FAIL_MATRIX) if self.config.GET_PASSFAIL_OUTPUT_SUMMARY.get_val(): pf_execoutput_file = \ self.head_explorer.get_file_pathname(\ outdir_struct.TMP_PROGRAM_TESTEXECUTION_OUTPUT) else: pf_execoutput_file = None DriversUtils.update_matrix_to_cover_when_diference(\ criterion_to_matrix[crit], \ criterion_to_execoutput[crit], \ pf_matrix_file, pf_execoutput_file) # Unset test oracle self.test_oracle_manager.set_oracle(criteria_on=None) # @Checkpointing self.checkpointer.write_checkpoint(self.cp_data.get_json_obj()) # @Checkpointing self.cp_data.tasks_obj.set_task_completed(task) self.checkpointer.write_checkpoint(self.cp_data.get_json_obj()) elif task == checkpoint_tasks.Tasks.PASS_FAIL_STATS: # Make sure that the Matrices dir exists self.head_explorer.get_or_create_and_get_dir(\ outdir_struct.RESULTS_STATS_DIR) tmp_matrix_file = self.head_explorer.get_file_pathname(\ outdir_struct.TMP_TEST_PASS_FAIL_MATRIX) matrix_file = self.head_explorer.get_file_pathname(\ outdir_struct.TEST_PASS_FAIL_MATRIX) tmp_execoutput_file = self.head_explorer.get_file_pathname(\ outdir_struct.TMP_PROGRAM_TESTEXECUTION_OUTPUT) execoutput_file = self.head_explorer.get_file_pathname(\ outdir_struct.PROGRAM_TESTEXECUTION_OUTPUT) # Merge TMP pass fail and potentially existing pass fail StatsComputer.merge_lmatrix_into_right(\ tmp_matrix_file, matrix_file) StatsComputer.merge_lexecoutput_into_right(\ tmp_execoutput_file, execoutput_file) # @Checkpointing self.cp_data.tasks_obj.set_task_completed(task) self.checkpointer.write_checkpoint(self.cp_data.get_json_obj()) # Cleanup self.head_explorer.remove_file_and_get(\ outdir_struct.TMP_TEST_PASS_FAIL_MATRIX) elif task == checkpoint_tasks.Tasks.CRITERIA_STATS: # Make sure that the Matrices dir exists self.head_explorer.get_or_create_and_get_dir(\ outdir_struct.RESULTS_STATS_DIR) # Merge TMP criteria and potentially existing criteria for criterion in self.config.ENABLED_CRITERIA.get_val(): tmp_matrix_file = self.head_explorer.get_file_pathname(\ outdir_struct.TMP_CRITERIA_MATRIX[criterion]) matrix_file = self.head_explorer.get_file_pathname(\ outdir_struct.CRITERIA_MATRIX[criterion]) tmp_execoutput_file = self.head_explorer.get_file_pathname(\ outdir_struct.TMP_CRITERIA_EXECUTION_OUTPUT[criterion]) execoutput_file = self.head_explorer.get_file_pathname(\ outdir_struct.CRITERIA_EXECUTION_OUTPUT[criterion]) StatsComputer.merge_lmatrix_into_right(tmp_matrix_file, \ matrix_file) StatsComputer.merge_lexecoutput_into_right(\ tmp_execoutput_file, execoutput_file) # @Checkpointing self.cp_data.tasks_obj.set_task_completed(task) self.checkpointer.write_checkpoint(self.cp_data.get_json_obj()) # Cleanup for criterion in self.config.ENABLED_CRITERIA.get_val(): self.head_explorer.remove_file_and_get(\ outdir_struct.TMP_CRITERIA_MATRIX[criterion]) elif task == checkpoint_tasks.Tasks.AGGREGATED_STATS: # Compute the final stats (MS, ...) StatsComputer.compute_stats(self.config, self.head_explorer) #------------------------------------------------------------------- if not self.cp_data.tasks_obj.task_is_complete(task): # @Checkpoint: set task as done self.cp_data.tasks_obj.set_task_completed(task) # @Checkpoint: write checkpoint self.checkpointer.write_checkpoint(\ json_obj=self.cp_data.get_json_obj())