Exemplo n.º 1
0
    def _update_tests_duplicates_map(self, recompute=False):
        if os.path.isfile(self.duplicated_tests_info_file) and not recompute:
            self.tests_duplicates_map = common_fs.loadJSON(
                self.duplicated_tests_info_file)
            return

        groups = {}
        for _, dat in self.testcases_configured_tools.items():
            tobj = dat[self.TOOL_OBJ_KEY]
            fmt = tobj.get_test_format_class()
            if fmt is None:
                continue
            if fmt not in groups:
                groups[fmt] = []
            groups[fmt].append(tobj)

        self.tests_duplicates_map = {}
        # for each group do fdupes and merge all
        for fmt, tt_list in groups.items():
            if len(tt_list) > 1:
                kepttest2duptest_map, test2keptdup = \
                                                fmt.cross_tool_fdupes(*tt_list)
                self.tests_duplicates_map.update(test2keptdup)

        if os.path.isdir(os.path.dirname(self.duplicated_tests_info_file)):
            common_fs.dumpJSON(self.tests_duplicates_map, \
                                self.duplicated_tests_info_file, pretty=True)
Exemplo n.º 2
0
    def _do_instrument_code (self, outputdir, exe_path_map, \
                                        code_builds_factory, \
                                        enabled_criteria, parallel_count=1):
        # Setup
        if os.path.isdir(self.instrumented_code_storage_dir):
            shutil.rmtree(self.instrumented_code_storage_dir)
        os.mkdir(self.instrumented_code_storage_dir)
        if os.path.isdir(self.gc_files_dir):
            shutil.rmtree(self.gc_files_dir)
        os.mkdir(self.gc_files_dir)

        prog = 'gcc'

        flags = ['--coverage', '-fprofile-dir='+self.gc_files_dir, '-O0']
        additionals = ["-fkeep-inline-functions"]
        
        # get gcc version
        ret, out, err = DriversUtils.execute_and_get_retcode_out_err(\
                                                        prog, ['-dumpversion'])
        ERROR_HANDLER.assert_true(ret == 0, "'gcc -dumpversion' failed'")
        
        # if version > 6.5
        if int(out.split('.')[0]) >= 6:
            if int(out.split('.')[0]) > 6 or int(out.split('.')[1]) > 5:
                additionals += ["-fkeep-static-functions"]
        
        flags += additionals
        
        rel_path_map = {}
        exes, _ = code_builds_factory.repository_manager.\
                                                    get_relative_exe_path_map()
        for exe in exes:
            filename = os.path.basename(exe)
            rel_path_map[exe] = os.path.join(\
                                self.instrumented_code_storage_dir, filename)

        self.instrument_callback_obj = self.InstrumentCallbackObject()
        self.instrument_callback_obj.set_post_callback_args(\
                                            (self.gc_files_dir, rel_path_map))
        pre_ret, ret, post_ret = code_builds_factory.transform_src_into_dest(\
                        src_fmt=CodeFormats.C_SOURCE,\
                        dest_fmt=CodeFormats.NATIVE_CODE,\
                        src_dest_files_paths_map=None,\
                        compiler=prog, flags_list=flags, clean_tmp=True, \
                        reconfigure=True, \
                        callback_object=self.instrument_callback_obj)
        
        # Check
        if ret == common_mix.GlobalConstants.COMMAND_FAILURE:
            ERROR_HANDLER.error_exit("Program {} {}.".format(prog,\
                                        'built problematic'), __file__)

        # write down the rel_path_map
        ERROR_HANDLER.assert_true(not os.path.isfile(\
                self.instrumentation_details), "must not exist here", __file__)
        common_fs.dumpJSON(rel_path_map, self.instrumentation_details)
Exemplo n.º 3
0
    def __init__(self, repository_manager, workdir=None):
        self.repository_manager = repository_manager
        self.workdir = workdir
        self.src_dest_fmt_to_handling_obj = {}

        self.code_conversion_tracker_file = None
        self.stored_files_mapping = None
        self.stored_files_mapping_file = None
        if self.workdir is not None:
            self.repo_path_saved_file = os.path.join(self.workdir, \
                                                             "saved_repo_path")
            self.code_conversion_tracker_file = \
                                    os.path.join(self.workdir, "is_converting")
            self.stored_files_mapping_file = \
                                        os.path.join(self.workdir, "files_map")
            exes, src_obj_map = \
                            self.repository_manager.get_relative_exe_path_map()
            if os.path.isdir(self.workdir):
                tmp = common_fs.loadJSON(self.stored_files_mapping_file)
                self.stored_files_mapping = {k: os.path.join(self.workdir, v) \
                                                        for k,v in tmp.items()}
            else:
                count = 0
                self.stored_files_mapping = {}
                tmp = {}
                for f in exes:
                    tmp[f] = str(count)
                    self.stored_files_mapping[f] = \
                                        os.path.join(self.workdir, str(count))
                    count += 1
                for s, o in list(src_obj_map.items()):
                    tmp[o] = str(count)
                    self.stored_files_mapping[o] = \
                                        os.path.join(self.workdir, str(count))
                    count += 1
                os.mkdir(self.workdir)
                common_fs.dumpJSON(tmp, \
                                self.stored_files_mapping_file, pretty=True)

        # Initialize
        for src_fmt, obj_cls in formatfrom_function_tuples:
            if isinstance(obj_cls, ccs.IdentityCodeConverter):
                self._fmt_from_to_registration(src_fmt, src_fmt, obj_cls)
            else:
                ERROR_HANDLER.assert_true(\
                                src_fmt in obj_cls.get_source_formats(), \
                                "{} {} {} {}".format( \
                            "Error in 'formatfrom_function_tuples'",
                            "src_fmt", src_fmt, "not in corresponding obj..."),
                                                                    __file__)
                for dest_fmt in obj_cls.get_destination_formats_for(src_fmt):
                    self._fmt_from_to_registration(src_fmt, dest_fmt, obj_cls)
Exemplo n.º 4
0
def fault_analysis(cm_corebench_scripts_dir, c_id, conf_py, in_muteria_outdir,
                   out_top_dir):
    if not os.path.isdir(out_top_dir):
        os.mkdir(out_top_dir)

    in_res_data_dir = os.path.join(in_muteria_outdir, 'latest', 'RESULTS_DATA')
    testtools_workdir = os.path.join(in_muteria_outdir, 'latest',
                                     'testscases_workdir')
    pass_fail_matrix = os.path.join(in_res_data_dir, "matrices",
                                    "PASSFAIL.csv")

    pf_mat = common_matrices.ExecutionMatrix(filename=pass_fail_matrix)
    test_list = list(pf_mat.get_nonkey_colname_list())
    #semu_tool_dirs = [d for d in os.listdir(testtools_workdir) if d.startswith('semu_cmp-')]

    # get fault tests
    get_commit_fault_tests(cm_corebench_scripts_dir, c_id, conf_py,
                           in_res_data_dir, out_top_dir)

    # get test to timestamp
    test_timestamp_file = os.path.join(out_top_dir, "test_to_timestamp.json")
    test2timestamp = {}
    ## get tests by tools
    tools2tests = {}
    for test in test_list:
        alias, _ = DriversUtils.reverse_meta_element(test)
        # XXX: only SEMU
        if not alias.startswith('semu_cmp-'):
            continue
        if alias not in tools2tests:
            tools2tests[alias] = set()
        tools2tests[alias].add(test)
    ## untar tests dir
    for alias, tests in tools2tests.items():
        d = os.path.join(testtools_workdir, alias)
        assert os.path.isdir(
            d), "test tool dir " + d + " missing for alias " + alias
        test_tar = os.path.join(d, 'tests_files.tar.gz')
        es = common_fs.TarGz.decompressDir(test_tar)
        assert es is None, "decompress error: " + es
        tests_files = os.path.join(d, 'tests_files')
        assert os.path.isdir(tests_files), "dir missing after decompress"
        for test in tests:
            _, simple_test = DriversUtils.reverse_meta_element(test)
            gt = TestcasesToolSemu._get_generation_time_of_test(
                simple_test, tests_files)
            test2timestamp[test] = gt
        shutil.rmtree(tests_files)

    common_fs.dumpJSON(test2timestamp, test_timestamp_file, pretty=True)
Exemplo n.º 5
0
    def _collect_temporary_coverage_data(self, criteria_name_list, \
                                            test_execution_verdict, \
                                            used_environment_vars, \
                                                    result_dir_tmp):
        ''' get gcov files from gcda files into result_dir_tmp
        '''
        prog = 'gcov'

        cov2flags = {
                    TestCriteria.STATEMENT_COVERAGE: [],
                    TestCriteria.BRANCH_COVERAGE: ['-b', '-c'],
                    TestCriteria.FUNCTION_COVERAGE: ['-f'],
                }

        args_list = []
        for criterion in criteria_name_list:
            args_list += cov2flags[criterion]

        gcda_files = self._get_gcda_list()

        raw_filename_list = [os.path.splitext(f)[0] for f in gcda_files]
        args_list += raw_filename_list
        
        if len(gcda_files) > 0:
            # TODO: When gcov generate coverage for different files with
            # same name filename bu located at diferent dir. Avoid override.
            # Go where the gcov will be looked for
            cwd = os.getcwd()
            os.chdir(self.gc_files_dir)

            # collect gcda (gcno)
            r, _, _ = DriversUtils.execute_and_get_retcode_out_err(prog=prog, \
                                        args_list=args_list, out_on=False, \
                                                                err_on=False)

            os.chdir(cwd)
            
            if r != 0:
                ERROR_HANDLER.error_exit("Program {} {}.".format(prog,\
                        'error collecting coverage is problematic'), __file__)
            
            # delete gcda
            for gcda_f in gcda_files:
                os.remove(gcda_f)
            
            common_fs.dumpJSON(self._get_gcov_list(), \
                                os.path.join(result_dir_tmp,\
                                                self.gcov_files_list_filename))
Exemplo n.º 6
0
    def compute_stats(config, explorer):
        # get the matrix of each test criterion
        coverages = {}
        total_to = {}
        for c in config.ENABLED_CRITERIA.get_val():
            if explorer.file_exists(fd_structure.CRITERIA_MATRIX[c]):
                mat_file = explorer.get_existing_file_pathname(\
                                            fd_structure.CRITERIA_MATRIX[c])
                mat = common_matrices.ExecutionMatrix(filename=mat_file)
                row2collist = mat.query_active_columns_of_rows()
                cov = len([k for k, v in row2collist.items() if len(v) > 0])
                tot = len(row2collist)
                coverages[c.get_str()] = '{:.2f}'.format(cov * 100.0 / tot)
                total_to[c.get_str()] = tot

        # JSON
        out_json = {}
        for c in coverages:
            out_json[c] = {
                'coverage': coverages[c],
                '# test objectives': total_to[c]
            }
        common_fs.dumpJSON(out_json, explorer.get_file_pathname(\
                                            fd_structure.STATS_MAIN_FILE_JSON))

        # HTML
        template_file = os.path.join(os.path.dirname(\
                            os.path.abspath(__file__)), 'summary_report.html')
        report_file = explorer.get_file_pathname(\
                                            fd_structure.STATS_MAIN_FILE_HTML)
        rendered = Template(open(template_file).read()).render( \
                                {'coverages':coverages, 'total_to':total_to})
        with open(report_file, 'w') as f:
            f.write(rendered)

        try:
            webbrowser.get()
            webbrowser.open('file://' + report_file, new=2)
        except Exception as e:
            logging.warning("webbrowser error: " + str(e))

    #~ def compute_stats()


#~ class DataHandling
Exemplo n.º 7
0
    def test_dumpJSON(self):
        jfilename = os.path.join(self._worktmpdir, "jsontmp.json")
        exp = {"x": 1, "y": [2, 3, "zz"]}
        res = common_fs.dumpJSON(exp, jfilename)
        # dumpJSON succeded
        self.assertEqual(res, None)

        with open(jfilename) as fp:
            res = json.load(fp)
        # dumpJSON was correct
        self.assertEqual(res, exp)
        os.remove(jfilename)
Exemplo n.º 8
0
    def _do_generate_tests (self, exe_path_map, code_builds_factory, \
                                                meta_criteria_tool_obj=None, \
                                                                max_time=None):
        dtl = self.code_builds_factory.repository_manager.get_dev_tests_list()
        ERROR_HANDLER.assert_true(dtl is not None, "invalid dev_test_list", \
                                                                    __file__)
        if os.path.isdir(self.tests_storage_dir):
            shutil.rmtree(self.tests_storage_dir)
        os.mkdir(self.tests_storage_dir)

        if self.wrapper_test_splitting:
            # Execute the tests with the counting wrapper
            logging.debug("## Splitting tests with wrapper")
            split_workdir = os.path.join(self.tests_storage_dir)
            new_exe_path_map = self.wrapper_obj.get_test_splitting_wrapper()\
                                    .set_wrapper(split_workdir, exe_path_map)
            sub_test_list = []
            for test in dtl:
                logging.debug('splitting test '+test) #DBG
                self.wrapper_obj.get_test_splitting_wrapper()\
                                            .switch_to_new_test()
                self.execute_testcase(test, new_exe_path_map, {})
                n_subtest, args = \
                                self.wrapper_obj.get_test_splitting_wrapper()\
                                                                .collect_data()
                for i in range(n_subtest):
                    sub_test_list.append([test+self.splittest_ext+str(i), \
                                        args[i] if i < len(args) else None])

            self.wrapper_obj.get_test_splitting_wrapper().cleanup()

            # update dtl with counter tests
            dtl = sub_test_list
        else:
            dtl = [[t, None] for t in dtl]

        common_fs.dumpJSON(dtl, self.test_list_storage_file)
Exemplo n.º 9
0
 def serialize(self):
     """ Serialize the matrix to its corresponding file if not None
     """
     if self.filename is not None:
         common_fs.dumpJSON(self.data, self.filename, pretty=True)
Exemplo n.º 10
0
    def _do_instrument_code (self, outputdir, exe_path_map, \
                                        code_builds_factory, \
                                        enabled_criteria, parallel_count=1):
        # Setup
        if os.path.isdir(self.instrumented_code_storage_dir):
            shutil.rmtree(self.instrumented_code_storage_dir)
        os.mkdir(self.instrumented_code_storage_dir)
        if os.path.isdir(self.mutant_data):
            shutil.rmtree(self.mutant_data)
        os.mkdir(self.mutant_data)

        prog = 'mart'

        # get llvm compiler path
        ret, out, err = DriversUtils.execute_and_get_retcode_out_err(\
                                                        prog, ['--version'])
        llvm_compiler_path = None
        for line in out.splitlines():
            line = line.strip()
            if line.startswith('LLVM tools dir:'):
                llvm_compiler_path = line.split()[3]  #[1:-1]
                break

        ERROR_HANDLER.assert_true(llvm_compiler_path is not None, \
                                'Problem getting llvm path for mart', __file__)

        # Build into LLVM
        back_llvm_compiler = 'clang'
        rel_path_map = {}
        exes, _ = code_builds_factory.repository_manager.\
                                                    get_relative_exe_path_map()
        for exe in exes:
            filename = os.path.basename(exe)
            rel_path_map[exe] = os.path.join(self.mutant_data, filename)
        pre_ret, ret, post_ret = code_builds_factory.transform_src_into_dest(\
                        src_fmt=CodeFormats.C_SOURCE,\
                        dest_fmt=CodeFormats.LLVM_BITCODE,\
                        src_dest_files_paths_map=rel_path_map,\
                        compiler=back_llvm_compiler, flags_list=['-g'], \
                        clean_tmp=True, reconfigure=True, \
                        llvm_compiler_path=llvm_compiler_path)
        if ret == common_mix.GlobalConstants.TEST_EXECUTION_ERROR:
            ERROR_HANDLER.error_exit("Program {}.".format(\
                                'LLVM (clang) built problematic'), __file__)

        # Update exe_map to reflect bitcode extension
        rel2bitcode = {}
        for r_file, b_file in list(rel_path_map.items()):
            bc = b_file + '.bc'
            ERROR_HANDLER.assert_true(os.path.isfile(bc), \
                                    "Bitcode file not existing: "+bc, __file__)
            rel2bitcode[r_file] = bc

        ERROR_HANDLER.assert_true(len(rel_path_map) == 1, \
                            "Support single bitcode module for now", __file__)

        bitcode_file = rel2bitcode[list(rel2bitcode.keys())[0]]

        # mart params
        bool_param, k_v_params = self._get_default_params()
        if TestCriteria.STRONG_MUTATION in enabled_criteria:
            bool_param['-write-mutants'] = True

        args = [bp for bp, en in list(bool_param.items()) if en]
        for k, v in list(k_v_params.items()):
            if v is not None:
                args += [k, v]
        args.append(bitcode_file)

        # Execute Mart
        cwd = os.getcwd()
        os.chdir(self.mutant_data)
        ret, out, err = DriversUtils.execute_and_get_retcode_out_err(\
                                                                    prog, args)
        os.chdir(cwd)

        if (ret != 0):
            logging.error(out)
            logging.error(err)
            logging.error("\n>> CMD: " + " ".join([prog] + args) + '\n')
            ERROR_HANDLER.error_exit("mart failed'", __file__)

        # write down the rel_path_map
        ERROR_HANDLER.assert_true(not os.path.isfile(\
                self.instrumentation_details), "must not exist here", __file__)
        store_obj = {c.get_str(): {} for c in enabled_criteria}
        for k, v in list(rel_path_map.items()):
            exe_file = os.path.basename(v)
            if TestCriteria.WEAK_MUTATION in enabled_criteria:
                crit_str = TestCriteria.WEAK_MUTATION.get_str()
                store_obj[crit_str][k] = exe_file + '.WM'
            if TestCriteria.MUTANT_COVERAGE in enabled_criteria:
                crit_str = TestCriteria.MUTANT_COVERAGE.get_str()
                store_obj[crit_str][k] = exe_file + '.COV'
            if TestCriteria.STRONG_MUTATION in enabled_criteria:
                crit_str = TestCriteria.STRONG_MUTATION.get_str()
                store_obj[crit_str][k] = exe_file + '.MetaMu'
        common_fs.dumpJSON(store_obj, self.instrumentation_details)
Exemplo n.º 11
0
 def write_to_file(self, filepath):
     common_fs.dumpJSON(self, filepath)
Exemplo n.º 12
0
 def write_to_file(self, file_path):
     common_fs.dumpJSON(self.data, file_path, pretty=True)
Exemplo n.º 13
0
    def _do_generate_tests (self, exe_path_map, outputdir, \
                                        code_builds_factory, max_time=None):
        # Setup
        if os.path.isdir(self.tests_working_dir):
            shutil.rmtree(self.tests_working_dir)
        os.mkdir(self.tests_working_dir)
        if os.path.isdir(self.tests_storage_dir):
            shutil.rmtree(self.tests_storage_dir)

        prog = 'klee'
        default_sym_args = ['-sym-arg', '5']
        back_llvm_compiler = None  #'clang'

        rel_path_map = {}
        exes, _ = code_builds_factory.repository_manager.\
                                                    get_relative_exe_path_map()
        for exe in exes:
            filename = os.path.basename(exe)
            rel_path_map[exe] = os.path.join(self.tests_working_dir, filename)
        pre_ret, ret, post_ret = code_builds_factory.transform_src_into_dest(\
                        src_fmt=CodeFormats.C_SOURCE,\
                        dest_fmt=CodeFormats.LLVM_BITCODE,\
                        src_dest_files_paths_map=rel_path_map,\
                        compiler=back_llvm_compiler, \
                        clean_tmp=True, reconfigure=True)
        if ret == common_mix.GlobalConstants.TEST_EXECUTION_ERROR:
            ERROR_HANDLER.error_exit("Program {}.".format(\
                                'LLVM built problematic'), __file__)

        # Update exe_map to reflect bitcode extension
        rel2bitcode = {}
        for r_file, b_file in list(rel_path_map.items()):
            bc = b_file + '.bc'
            ERROR_HANDLER.assert_true(os.path.isfile(bc), \
                                    "Bitcode file not existing: "+bc, __file__)
            rel2bitcode[r_file] = bc

        ERROR_HANDLER.assert_true(len(rel_path_map) == 1, \
                            "Support single bitcode module for now", __file__)

        bitcode_file = rel2bitcode[list(rel2bitcode.keys())[0]]

        # klee params
        bool_param, k_v_params = self._get_default_params()
        if max_time is not None:
            k_v_params['-max-time'] = str(max_time)

        args = [bp for bp, en in list(bool_param.items()) if en]
        for k, v in list(k_v_params.items()):
            if v is not None:
                args += [k, str(v)]
        args.append(bitcode_file)

        # sym args
        klee_sym_args = default_sym_args
        uc = self.config.get_tool_user_custom()
        if uc is not None:
            post_bc_cmd = uc.POST_TARGET_CMD_ORDERED_FLAGS_LIST
            if post_bc_cmd is not None:
                klee_sym_args = []
                for tup in post_bc_cmd:
                    klee_sym_args += list(tup)
        args += klee_sym_args

        # Execute Klee
        ret, out, err = DriversUtils.execute_and_get_retcode_out_err(\
                                                                    prog, args)

        if (ret != 0):
            logging.error(out)
            logging.error(err)
            logging.error("\n>> CMD: " + " ".join([prog] + args) + '\n')
            ERROR_HANDLER.error_exit("klee failed'", __file__)

        store_obj = {r: os.path.basename(b) for r, b in rel2bitcode.items()}
        common_fs.dumpJSON(store_obj, self.test_details_file)

    #~ def _do_generate_tests()


#~ class CustomTestcases
Exemplo n.º 14
0
def summarize_data(outdir, summarized_data_dir, mutant_exec_res, \
                                                        mut_ex_prepa_data_dir):
    other_rel_to_reltests_file = os.path.join(
        mut_ex_prepa_data_dir, 'relevantmuts_to_relevanttests.json')
    other_rel_to_reltests = common_fs.loadJSON(other_rel_to_reltests_file)
    stored_map_file = os.path.join(mut_ex_prepa_data_dir,
                                   'stored_test_map.json')
    stored_map = common_fs.loadJSON(stored_map_file)

    def get_too2relmuts(relmuts_to_reltests, toollist=None):
        res = {tool: set()
               for tool in toollist} if toollist is not None else {}
        for relmut, t_list in relmuts_to_reltests.items():
            for meta_t in t_list:
                toolalias, test = DriversUtils.reverse_meta_element(meta_t)
                if toollist is None:
                    if toolalias not in res:
                        res[toolalias] = set()
                else:
                    assert toolalias in toollist, "PB: toolalias ({}) not in toollist ({})".format(
                        toolalias, toollist)
                res[toolalias].add(relmut)
        for toolalias in res:
            res[toolalias] = list(res[toolalias])
        return res

    #~ def get_too2relmuts()

    other_tool_to_relmuts = get_too2relmuts(other_rel_to_reltests)

    all_tests, fail_tests, relevant_mutants_to_relevant_tests, \
                mutants_to_killingtests, tests_to_killed_mutants = load.load (mutant_exec_res, fault_revealing=False)
    if not os.path.isdir(summarized_data_dir):
        os.mkdir(summarized_data_dir)

    cmp_result_file = os.path.join(summarized_data_dir, "compare_results.json")
    initial_relmuts_file = os.path.join(summarized_data_dir,
                                        "initial_relevant.json")
    rMS_file = os.path.join(summarized_data_dir, "rMS.json")

    # get relevant muts by tools
    # get tool list
    tinf = common_fs.loadJSON(
        os.path.join(
            mutant_exec_res,
            "post/RESULTS_DATA/other_copied_results/testcasesInfos.json"))
    tool2relmuts = get_too2relmuts(
        relevant_mutants_to_relevant_tests,
        toollist=list(set(tinf["CUSTOM"]) - {"custom_devtests"}))

    # update with stored map
    for relmut, t_list in other_rel_to_reltests.items():
        for other_meta_t in t_list:
            if other_meta_t in stored_map:
                for meta_t in stored_map[other_meta_t]:
                    toolalias, test = DriversUtils.reverse_meta_element(meta_t)
                    assert toolalias in tool2relmuts, "PB2: "
                    tool2relmuts[toolalias].append(relmut)
    for toolalias in tool2relmuts:
        tool2relmuts[toolalias] = list(set(tool2relmuts[toolalias]))

    common_fs.dumpJSON(tool2relmuts, cmp_result_file, pretty=True)
    common_fs.dumpJSON(other_tool_to_relmuts,
                       initial_relmuts_file,
                       pretty=True)

    # Compute and print the relevant mutation score per tool
    all_rel_muts = set()
    devtest_rel = set()
    for talias, rellist in tool2relmuts.items():
        all_rel_muts |= set(rellist)
    for talias, rellist in other_tool_to_relmuts.items():
        all_rel_muts |= set(rellist)
        if talias.startswith("custom_devtests"):
            devtest_rel |= set(rellist)

    rMS = {"ALL-GENONLY": {}, "ALL-ALL": {}, "ADDITIONAL": {}}
    for talias, rellist in tool2relmuts.items():
        rMS["ALL-ALL"][talias] = len(set(rellist)
                                     | devtest_rel) * 100.0 / len(all_rel_muts)
        rMS["ALL-GENONLY"][talias] = len(rellist) * 100.0 / len(all_rel_muts)
        rMS["ADDITIONAL"][talias] = len(set(
            rellist) - devtest_rel) * 100.0 / len(all_rel_muts - devtest_rel)
    common_fs.dumpJSON(rMS, rMS_file, pretty=True)
Exemplo n.º 15
0
def prepare_mutant_execution(outdir, muteria_output, original_conf, \
                                    prepare_data_dir, \
                    avoid_meta_tests_list_file, avoid_meta_mutants_list_file):
    if not os.path.isdir(prepare_data_dir):
        os.mkdir(prepare_data_dir)

    # Store the test list per technique
    test_list_file = os.path.join(muteria_output, 'latest', 'RESULTS_DATA', \
                                'other_copied_results', 'testcasesInfos.json')
    shutil.copy2(test_list_file, os.path.join(prepare_data_dir, \
                                'gentests_'+os.path.basename(test_list_file)))

    relevant_exec_outfolder = os.path.join(os.path.dirname(original_conf), \
                                                            'output', 'latest')

    # Do fdupes with relevant output tests of semu and shadow and store map
    r_shadow_tests_src = os.path.join(relevant_exec_outfolder, \
                            'testscases_workdir', 'shadow_se', 'tests_files')
    r_shadow_tests_src_tar = r_shadow_tests_src + '.tar.gz'
    r_semu_tests_src = os.path.join(relevant_exec_outfolder, \
                                'testscases_workdir', 'semu', 'tests_files')
    r_semu_tests_src_tar = r_semu_tests_src + '.tar.gz'

    cur_exec_outfolder = os.path.join(muteria_output, 'latest')
    cur_exec_tg_folder = os.path.join(cur_exec_outfolder, 'testscases_workdir')

    cur_shadow_tests_src = None
    cur_semu_tests_srcs = []
    cur_semu_tests_src_tars = []
    for f in os.listdir(cur_exec_tg_folder):
        if f.startswith('semu'):
            cur_semu_tests_srcs.append(
                os.path.join(cur_exec_tg_folder, f, 'tests_files'))
            cur_semu_tests_src_tars.append(cur_semu_tests_srcs[-1] + '.tar.gz')
        elif f.startswith('shadow_se'):
            if cur_shadow_tests_src is not None:
                error_exit("Multiple shadow_se folders exist")
            cur_shadow_tests_src = os.path.join(cur_exec_tg_folder, f,
                                                'tests_files')
            cur_shadow_tests_src_tar = cur_shadow_tests_src + '.tar.gz'
    # use folder fdupes from KtestFormat in muteria
    ## decompress the folders
    common_fs.TarGz.decompressDir(r_shadow_tests_src_tar)
    common_fs.TarGz.decompressDir(r_semu_tests_src_tar)
    common_fs.TarGz.decompressDir(cur_shadow_tests_src_tar)
    for i in range(len(cur_semu_tests_src_tars)):
        common_fs.TarGz.decompressDir(cur_semu_tests_src_tars[i])

    ## apply fdupes
    def post_test_dup(stored_map, tests_to_avoid, origin,
                      kepttest2duptest_map):
        for kept, duplist in kepttest2duptest_map.items():
            alias, _ = DriversUtils.reverse_meta_element(kept)
            if alias == origin:
                stored_map[kept] = []
                for dup in duplist:
                    dalias, dtest = DriversUtils.reverse_meta_element(dup)
                    if dalias == origin:
                        continue
                    stored_map[kept].append(dup)
                    tests_to_avoid.append(dup)

    #~ def post_test_dup()

    stored_map = {}
    stored_map_file = os.path.join(prepare_data_dir, 'stored_test_map.json')
    tests_to_avoid = []

    ### Shadow
    custom_bin = '/home/shadowvm/shadow/klee-change/Release+Asserts/bin/'
    folders = [r_shadow_tests_src, cur_shadow_tests_src]
    folder2alias = {d: os.path.basename(os.path.dirname(d)) for d in folders}
    origin = folder2alias[r_shadow_tests_src]
    kepttest2duptest_map, test2keptdup = KTestTestFormat.cross_folder_fdupes(\
                                            custom_bin, folders, folder2alias)
    post_test_dup(stored_map, tests_to_avoid, origin, kepttest2duptest_map)
    ## remove the untar dirs
    for d in folders:
        shutil.rmtree(d)

    ### Semu
    custom_bin = None
    folders = [r_semu_tests_src] + cur_semu_tests_srcs
    folder2alias = {d: os.path.basename(os.path.dirname(d)) for d in folders}
    origin = folder2alias[r_semu_tests_src]
    kepttest2duptest_map, test2keptdup = KTestTestFormat.cross_folder_fdupes(\
                                            custom_bin, folders, folder2alias)
    post_test_dup(stored_map, tests_to_avoid, origin, kepttest2duptest_map)
    for d in folders:
        shutil.rmtree(d)

    ## store dup map and tests to avoid
    common_fs.dumpJSON(stored_map, stored_map_file, pretty=True)
    common_fs.dumpJSON(tests_to_avoid, avoid_meta_tests_list_file, pretty=True)

    # XXX TODO: modify tg_conf to use tests to avoid

    # Get the criteria dir and add to the muteria out
    r_criteria_dir = os.path.join(relevant_exec_outfolder, 'criteria_workdir')
    dest_crit_work = os.path.join(muteria_output, 'latest', 'criteria_workdir')
    if os.path.isdir(os.path.join(dest_crit_work, 'mart_0')):
        shutil.rmtree(os.path.join(dest_crit_work, 'mart_0'))
    shutil.copytree(os.path.join(r_criteria_dir, 'mart_0'),
                    os.path.join(dest_crit_work, 'mart_0'))

    # Get the selected mutants (mutants that are Not relevant w.r.t dev tests)
    r_res_top = os.path.dirname(os.path.dirname(original_conf))
    r_res = os.path.join(r_res_top, 'res')
    r_res_tar = glob.glob(r_res_top + '/*res.tar.gz')[0]

    ## untar res
    if os.system('cd {} && tar -xzf {} --exclude {} --exclude {} && test -d res'.format(\
                        r_res_top, os.path.basename(r_res_tar), \
                        'post/RESULTS_DATA/other_copied_results/Flakiness', \
                        'pre/RESULTS_DATA/other_copied_results/Flakiness')) != 0:
        error_exit("untar re failed")
    ## get relevant mutants to relevant tests
    all_tests, fail_tests, relevant_mutants_to_relevant_tests, \
                mutants_to_killingtests, tests_to_killed_mutants = load.load (r_res, fault_revealing=False)

    ## remove untar
    shutil.rmtree(r_res)
    ## filter mutants (remove those relevant w.r.t. dev tests)
    rel_to_reltests_file = os.path.join(prepare_data_dir,
                                        'relevantmuts_to_relevanttests.json')
    mutants_to_avoid = []
    ### mutants relevant w.r.t. devtests added to avoid
    for mut, r_tests in relevant_mutants_to_relevant_tests.items():
        for rt in r_tests:
            alias, _ = DriversUtils.reverse_meta_element(rt)
            if alias.startswith("custom_devtests"):
                mutants_to_avoid.append(mut)
                break

    ## save filter mutants as to avoid
    common_fs.dumpJSON(relevant_mutants_to_relevant_tests,
                       rel_to_reltests_file,
                       pretty=True)
    common_fs.dumpJSON(mutants_to_avoid,
                       avoid_meta_mutants_list_file,
                       pretty=True)
Exemplo n.º 16
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('outdir', help="outdir of the execution")
    parser.add_argument('original_conf', help="config of the project")
    parser.add_argument('--cleanstart', action="store_true", \
                                                help="delete out and restart")
    parser.add_argument('--only_gentests', action="store_true", \
                              help="only generate tests, no mutant execution")
    parser.add_argument('--only_getseeds', action="store_true", \
                              help="only collect the seeds, no tests generation or mutant execution")
    parser.add_argument('--no_summary', action="store_true", \
                              help="Disable Summary")

    args = parser.parse_args()
    outdir = os.path.abspath(args.outdir)
    original_conf = os.path.abspath(args.original_conf)
    if args.cleanstart:
        if os.path.isdir(outdir):
            shutil.rmtree(outdir)
    if not os.path.isdir(outdir):
        os.mkdir(outdir)

    # XXX
    do_remove_added_devtests = args.only_gentests

    seeds_dir = os.path.join(outdir, "ktests_seeds")
    checkpoint_file = os.path.join(outdir, 'checkpoint.json')

    tg_conf = os.path.join(outdir, "tg_conf.py")
    muteria_output = os.path.join(outdir, "muteria_output")
    mut_ex_prepa_data_dir = os.path.join(outdir, "mut_ex_prepa_data")
    summarized_data_dir = os.path.join(outdir, "summarized_data_dir")
    mutant_exec_res = os.path.join(outdir, "mutant_exec_res")
    avoid_meta_mutants_list_file = os.path.join(outdir, mut_ex_prepa_data_dir, \
                                            "avoid_meta_mutants_list_file.txt")
    avoid_meta_tests_list_file = os.path.join(outdir, mut_ex_prepa_data_dir, \
                                            "avoid_meta_tests_list_file.txt")

    # load added dev tests
    added_devtest_map_file = os.path.join(os.path.dirname(__file__),
                                          'added_tests',
                                          'added_tests_map.json')
    added_devtest_map = common_fs.loadJSON(added_devtest_map_file)
    cur_id = os.path.basename(outdir)

    if do_remove_added_devtests:
        if cur_id not in added_devtest_map:
            error_exit("do_remove_added_devtests is true but id not in map")
        toskip_added_devtest = added_devtest_map[cur_id]
    else:
        toskip_added_devtest = []

    # check checkpoint and load if necessary
    cp_data = {
        SEED_COLLECTION: False,
        TEST_GENERATION: False,
        MUTANT_EXECUTION_PREPA: False,
        MUTANT_EXECUTION: False,
        DATA_SUMMARIZATION: False,
    }

    if args.only_getseeds:
        cp_data[TEST_GENERATION] = True
        cp_data[MUTANT_EXECUTION_PREPA] = True
        cp_data[MUTANT_EXECUTION] = True
        cp_data[DATA_SUMMARIZATION] = True
    if args.only_gentests:
        cp_data[MUTANT_EXECUTION_PREPA] = True
        cp_data[MUTANT_EXECUTION] = True
    if args.no_summary:
        cp_data[DATA_SUMMARIZATION] = True

    if os.path.isfile(checkpoint_file):
        cp_data = common_fs.loadJSON(checkpoint_file)

    if not cp_data[SEED_COLLECTION]:
        print("#(DBG): Executing {} ...".format(SEED_COLLECTION))
        collect_seeds(outdir, seeds_dir, muteria_output, original_conf,
                      toskip_added_devtest)
        cp_data[SEED_COLLECTION] = True
        common_fs.dumpJSON(cp_data, checkpoint_file, pretty=True)

    if not cp_data[TEST_GENERATION]:
        print("#(DBG): Executing {} ...".format(TEST_GENERATION))
        generate_tests (outdir, seeds_dir, muteria_output, original_conf, \
                          tg_conf, args.only_gentests, toskip_added_devtest, args.only_gentests)
        cp_data[TEST_GENERATION] = True
        common_fs.dumpJSON(cp_data, checkpoint_file, pretty=True)

    if not cp_data[MUTANT_EXECUTION_PREPA]:
        print("#(DBG): Executing {} ...".format(MUTANT_EXECUTION_PREPA))
        prepare_mutant_execution(outdir, muteria_output, original_conf, \
                            mut_ex_prepa_data_dir, avoid_meta_tests_list_file,\
                                                avoid_meta_mutants_list_file)
        cp_data[MUTANT_EXECUTION_PREPA] = True
        common_fs.dumpJSON(cp_data, checkpoint_file, pretty=True)

    if not cp_data[MUTANT_EXECUTION]:
        print("#(DBG): Executing {} ...".format(MUTANT_EXECUTION))
        mutant_execution(outdir, avoid_meta_tests_list_file, avoid_meta_mutants_list_file, \
                                    muteria_output, tg_conf, mutant_exec_res)
        cp_data[MUTANT_EXECUTION] = True
        common_fs.dumpJSON(cp_data, checkpoint_file, pretty=True)

    if not cp_data[DATA_SUMMARIZATION]:
        print("#(DBG): Executing {} ...".format(DATA_SUMMARIZATION))
        if args.only_gentests:
            cm_corebench_scripts_dir = os.path.join(
                os.path.dirname(
                    os.path.dirname(
                        os.path.dirname(os.path.dirname(original_conf)))),
                "cm_corebench_scripts")
            c_id = os.path.basename(outdir)
            get_fault_tests.fault_analysis (cm_corebench_scripts_dir, c_id, tg_conf, \
                                                    muteria_output, summarized_data_dir)
        else:
            summarize_data(outdir, summarized_data_dir, mutant_exec_res, \
                                                        mut_ex_prepa_data_dir)
        cp_data[DATA_SUMMARIZATION] = True
        common_fs.dumpJSON(cp_data, checkpoint_file, pretty=True)

    print("\n# DONE!\n")
Exemplo n.º 17
0
    def _do_instrument_code (self, exe_path_map, \
                                        code_builds_factory, \
                                        enabled_criteria, parallel_count=1):
        # Setup
        if os.path.isdir(self.instrumented_code_storage_dir):
            shutil.rmtree(self.instrumented_code_storage_dir)
        os.mkdir(self.instrumented_code_storage_dir)
        if os.path.isdir(self.used_srcs_dir):
            shutil.rmtree(self.used_srcs_dir)
        os.mkdir(self.used_srcs_dir)
        if os.path.isdir(self.preload_dir):
            shutil.rmtree(self.preload_dir)
        os.mkdir(self.preload_dir)

        rel_path_map = {}
        exes, src_inter_map = code_builds_factory.repository_manager.\
                                                    get_relative_exe_path_map()
        for f in src_inter_map:
            rel_path_map[f] = os.path.join(self.used_srcs_dir, f)
            relloc = os.path.join(self.used_srcs_dir, os.path.dirname(f))
            if not os.path.isdir(relloc):
                os.makedirs(relloc)
        not_none_dest = {k: v for k, v in list(rel_path_map.items())}
        pre_ret, ret, post_ret = code_builds_factory.transform_src_into_dest(\
                        src_fmt=CodeFormats.PYTHON_SOURCE,\
                        dest_fmt=CodeFormats.PYTHON_SOURCE,\
                        src_dest_files_paths_map=not_none_dest)
        # write down the rel_path_map
        ERROR_HANDLER.assert_true(not os.path.isfile(\
                self.instrumentation_details), "must not exist here", __file__)
        common_fs.dumpJSON(rel_path_map, self.instrumentation_details)

        if ret == common_mix.GlobalConstants.COMMAND_FAILURE:
            ERROR_HANDLER.error_exit("Problem with copying python sources", \
                                                                    __file__)

        # Create config and preload
        with open(self.preload_file, "w") as f:
            f.write("import coverage\ncoverage.process_startup()\n")

        concurrencies = ['thread', 'multiprocessing', 'gevent', 'greenlet', \
                                                                'eventlet']
        concurrency_support = 'thread'

        #config = configparser.ConfigParser()
        config = {}
        config['run'] = {
                        'include': \
                            [os.path.join('*',v) for v in rel_path_map.keys()],
                        'data_file': self.raw_data_file,
                        'branch': (\
                            TestCriteria.BRANCH_COVERAGE in enabled_criteria),
                        'parallel': True,
                        'concurrency': concurrency_support,
                        }
        with open(self.config_file, 'w') as f:
            #config.write(f)
            for k, v in list(config.items()):
                f.write('[' + k + ']' + '\n')
                for kk, vv in list(v.items()):
                    if type(vv) == list:
                        f.write(kk + ' = \n')
                        for vvv in vv:
                            f.write('\t' + str(vvv) + '\n')
                    else:
                        f.write(kk + ' = ' + str(vv) + '\n')

    #~ def _do_instrument_code()


#~ class CriteriaToolCoveragePy
Exemplo n.º 18
0
    def _collect_temporary_coverage_data(self, criteria_name_list, \
                                            test_execution_verdict, \
                                            used_environment_vars, \
                                            result_dir_tmp, \
                                            testcase):
        ''' get gcov files from gcda files into result_dir_tmp
        '''
        prog = 'gcov'
        if self.custom_binary_dir is not None:
            prog = os.path.join(self.custom_binary_dir, prog)
            ERROR_HANDLER.assert_true(os.path.isfile(prog), \
                            "The tool {} is missing from the specified dir {}"\
                                        .format(os.path.basename(prog), \
                                            self.custom_binary_dir), __file__)

        cov2flags = {
            TestCriteria.STATEMENT_COVERAGE: [],
            TestCriteria.BRANCH_COVERAGE: ['-b', '-c'],
            TestCriteria.FUNCTION_COVERAGE: ['-f'],
        }

        args_list = []
        for criterion in criteria_name_list:
            args_list += cov2flags[criterion]

        gcda_files = self._get_gcda_list()

        raw_filename_list = [os.path.splitext(f)[0] for f in gcda_files]
        args_list += raw_filename_list

        if len(gcda_files) > 0:
            # TODO: When gcov generate coverage for different files with
            # same name filename but located at diferent dir. Avoid override.
            # Go where the gcov will be looked for
            cwd = os.getcwd()
            os.chdir(self.gc_files_dir)

            # collect gcda (gcno)
            r, _, err_str = DriversUtils.execute_and_get_retcode_out_err(\
                                        prog=prog, \
                                        args_list=args_list, out_on=False, \
                                        err_on=True, merge_err_to_out=False)

            os.chdir(cwd)

            if r != 0:  # or err_str:
                ERROR_HANDLER.error_exit("Program {} {}.".format(prog,\
                        'collecting coverage is problematic. ')+
                        "The error msg is {}. \nThe command:\n{}".format(\
                                err_str, " ".join([prog]+args_list)), __file__)

            dot_gcov_file_list = self._get_gcov_list()
            # Sources of interest
            _, src_map = self.code_builds_factory.repository_manager.\
                                                    get_relative_exe_path_map()
            base_dot_gcov = [os.path.basename(f) for f in dot_gcov_file_list]
            base_raw = [os.path.basename(f) for f in raw_filename_list]
            interest = [os.path.basename(s) for s, o in src_map.items()]
            interest = [s+'.gcov' for s in interest if os.path.splitext(s)[0] \
                                                                in base_raw]
            missed = set(interest) - set(base_dot_gcov)

            # FIXME: Check for partial failure (compare number of gcno and gcov)
            ERROR_HANDLER.assert_true(len(missed) == 0,
                    "{} did not generate the '.gcov' files {}.".format(\
                        prog, missed)+ "\nIts stderr is {}.\n CMD: {}".format(\
                            err_str, " ".join([prog]+args_list)), __file__)

            # delete gcda
            for gcda_f in gcda_files:
                os.remove(gcda_f)
        else:
            if not self.driver_config.get_allow_missing_coverage():
                ERROR_HANDLER.error_exit(\
                    "Testcase '{}' did not generate gcda, {}".format(\
                        testcase, "when allow missing coverage is disabled"))
            dot_gcov_file_list = []

        common_fs.dumpJSON(dot_gcov_file_list, \
                                os.path.join(result_dir_tmp,\
                                                self.gcov_files_list_filename))
Exemplo n.º 19
0
    def _setup_repository(self):
        # Make sure the repo dir exists
        ERROR_HANDLER.assert_true(os.path.isdir(self.repository_rootdir), \
                        "given repository dir is not existing: {}". format( \
                            self.repository_rootdir), __file__)

        # make sure the repo dir is a git repo
        # if no, ask the user whether to initialize and initialize or abort
        # if yes or user accepted initialize, get the git object for the repo
        try:
            repo = git_repo(self.repository_rootdir)
            gitobj = repo.git
        except git_exc.InvalidGitRepositoryError:
            make_it_git = common_mix.confirm_execution("{} {} {} {}".format(\
                                    "The given repository directory is not", \
                                    "a git repository, this must be a git", \
                                    "repository to proceed.\n Do you want to",\
                                    "set is as git repository?"))
            if make_it_git:
                repo = git_repo.init(self.repository_rootdir)
                gitobj = repo.git
            else:
                ERROR_HANDLER.error_exit("{} {}".format(\
                            "Must make the repository as git repository,", \
                            "then re-run"), __file__)

        # Check whether the repo is already managed by muteria
        ## if using branch
        if self.delete_created_on_revert_as_initial:
            if self.test_branch_name not in self._get_branches_list(\
                                                self.repository_rootdir):
                ## Not managed, create branch
                self._make_testing_branch(self.repository_rootdir, \
                                                        self.test_branch_name)

            # checkout
            gitobj.checkout(self.test_branch_name)

        # Actual Check whether the repo is already managed by muteria
        # There must be a directory DEFAULT_MUTERIA_REPO_META_FOLDER
        src_in_prev = set()
        if os.path.isdir(self.muteria_metadir):
            ## Managed
            prev_src_list, prev_use_branch = \
                            common_fs.loadJSON(self.muteria_metadir_info_file)
            ERROR_HANDLER.assert_true(prev_use_branch == \
                        self.delete_created_on_revert_as_initial, \
                        "{} {} {} {} {}".format(\
                            "unmatching repo backup type.", \
                            "previously, use branch was", \
                            prev_use_branch, ", now it is",\
                            self.delete_created_on_revert_as_initial), \
                                                                    __file__)
            src_in_prev = set(prev_src_list) & set(self.source_files_list)
            prev_src_list = set(prev_src_list) - set(self.source_files_list)
            remain_prev_src_set = {src for src in prev_src_list \
                                    if os.path.isfile(self.repo_abs_path(src))}
            # make sure that all prev_src are in initial state
            untracked_diff_files = self._get_untracked_and_diffed(repo)
            prev_untracked_diff = remain_prev_src_set & untracked_diff_files
            if len(prev_untracked_diff) > 0:
                bypass = common_mix.confirm_execution(\
                            "{} {} {} {} {}".format(
                                "the following files were previously used as",\
                                "src files by muteria and are now untracked:",\
                                prev_untracked_diff, \
                                "\nDo you want to handle it or bypass it?",\
                                "Choose yes to bypass: "******"{} {}".format(\
                                "Do you want to automatically restore the",\
                                "The untracked previous source and continue?"))
                    if revert_them:
                        for src in prev_untracked_diff:
                            self.revert_repository_file(src, gitobj=gitobj)
                    else:
                        ERROR_HANDLER.error_exit(\
                                "Handle it manually and restart the execution")

            # update the info_file
            if set(prev_src_list) != set(self.source_files_list):
                common_fs.dumpJSON([self.source_files_list, \
                                    self.delete_created_on_revert_as_initial],\
                                                self.muteria_metadir_info_file)
        else:
            ## Not managed
            os.mkdir(self.muteria_metadir)
            common_fs.dumpJSON([self.source_files_list, \
                                    self.delete_created_on_revert_as_initial],\
                                                self.muteria_metadir_info_file)

        # Make sure all source files of interest are tracked
        untracked_diff_files = self._get_untracked_and_diffed(repo)
        untracked_diff_src_in_prev = untracked_diff_files & src_in_prev
        if len(untracked_diff_src_in_prev) > 0:
            if common_mix.confirm_execution(\
                            "{} {} {} {} {}".format("The following source",\
                                        "files of interest are untracked", \
                    "and will be reverted (previous execution unfinished):", \
                                        src_in_prev, \
                                        "do you want to revert them?")):
                for src in src_in_prev:
                    self.revert_repository_file(src, gitobj=gitobj)
            else:
                ERROR_HANDLER.error_exit("{} {}".format(\
                                    "Handle untracked source files manually", \
                                                "then restart the execution"))

        untracked_diff_files = self._get_untracked_and_diffed(repo)
        untracked_diff_src_files = set(self.source_files_list) & \
                                                        untracked_diff_files
        if len(untracked_diff_src_files) > 0:
            if common_mix.confirm_execution(\
                                "{} {} {} {}".format("The following source",\
                                        "files of interest are untracked:", \
                                        untracked_diff_src_files, \
                                        "do you want to track them?")):
                repo.index.add(list(untracked_diff_src_files))
            else:
                ERROR_HANDLER.error_exit("{} {}".format(\
                                    "Handle untracked source files manually", \
                                                "then restart the execution"))
Exemplo n.º 20
0
    def _runtests(self, testcases, exe_path_map, env_vars, \
                                stop_on_failure=False, per_test_timeout=None, \
                                use_recorded_timeout_times=None, \
                                recalculate_execution_times=False, \
                                with_output_summary=True, hash_outlog=True, \
                                parallel_count=1):
        '''
        Execute the list of test cases with the given executable and 
        say, for each test case, whether it failed.
        Note: Re-implement this if there the tool implements ways to faster
        execute multiple test cases.

        :param testcases: list of test cases to execute
        :param exe_path_map: string representing the file system path to 
                        the executable to execute with the tests
        :param env_vars: dict of environment variables to set before
                        executing each test ({<variable>: <value>})
        :param stop_on_failure: decide whether to stop the test execution once
                        a test fails
        :returns: plitair of:
                - dict of testcase and their failed verdict.
                 {<test case name>: <True if failed, False if passed, 
                    UNCERTAIN_TEST_VERDICT if uncertain>}
                 If stop_on_failure is True, only return the tests that have 
                 been executed until the failure
                - test execution output log hash data object or None
        '''
        # @Checkpoint: create a checkpoint handler (for time)
        checkpoint_handler = CheckPointHandler(self.get_checkpointer())
        if checkpoint_handler.is_finished():
            logging.warning("{} {} {}".format( \
                            "The function 'runtests' is finished according", \
                            "to checkpoint, but called again. None returned", \
                            "\nPlease Confirm reexecution..."))
            if common_mix.confirm_execution("{} {}".format( \
                                "Function 'runtests' is already", \
                                "finished, do yo want to restart?")):
                checkpoint_handler.restart()
                logging.info("Restarting the finished 'runtests'")
            else:
                ERROR_HANDLER.error_exit(err_string="{} {} {}".format( \
                        "Execution halted. Cannot continue because no value", \
                        " can be returned. Check the results of the", \
                        "finished execution"), call_location=__file__)

        if per_test_timeout is None:
            per_test_timeout = {tc: None for tc in testcases}
            if use_recorded_timeout_times is not None:
                ERROR_HANDLER.assert_true(use_recorded_timeout_times > 0, \
                                        "use_recorded_timeout_times must be "
                                        "positive if not None", __file__)
                per_test_timeout.update({x: (y * use_recorded_timeout_times) \
                                for x, y in self.test_execution_time.items()})
        else:
            ERROR_HANDLER.assert_true(use_recorded_timeout_times is None, \
                                "use_recorded_timeout_times must not be set "
                                "when per_test_timeout is set", __file__)

        # Prepare the exes
        self._prepare_executable(exe_path_map, env_vars, \
                                            collect_output=with_output_summary)
        self._set_env_vars(env_vars)

        test_failed_verdicts = {} 
        test_outlog_hash = {} 
        processbar = tqdm.tqdm(testcases, leave=False, dynamic_ncols=True) 

        # Parallel stuffs
        def test_exec_iteration(testcase):
            processbar.set_description("Running Test {} (x{})".format(\
                                                  testcase, parallel_count))
            start_time = time.time()
            test_failed, execoutlog_hash = \
                        self._oracle_execute_a_test(testcase, exe_path_map, \
                                        env_vars, \
                                        timeout=per_test_timeout[testcase], \
                                    with_output_summary=with_output_summary, \
                                        hash_outlog=hash_outlog)
            
            #if testcase.endswith('.ktest'):  # DBG - fix hang
            #    logging.debug("KTEST {} is done".format(testcase))

            # Record exec time if not existing
            with self.shared_loc:
                if recalculate_execution_times:
                    self.test_execution_time[testcase] = \
                                     max(1, int(time.time() - start_time)) \
                                 * self.config.RECORDED_TEST_TIMEOUT_FACTOR

                test_failed_verdicts[testcase] = test_failed
                test_outlog_hash[testcase] = execoutlog_hash
            return test_failed
        #~ def test_exec_iteration()

        if self.can_run_tests_in_parallel() and parallel_count is not None \
                                                    and parallel_count > 1:
            parallel_count = min(len(testcases), parallel_count)
            joblib.Parallel(n_jobs=parallel_count, require='sharedmem')\
                            (joblib.delayed(test_exec_iteration)(testcase) \
                                                for testcase in processbar)
        else:
            parallel_count = 1 # to be printed in progress
            for testcase in processbar: 
                test_failed = test_exec_iteration(testcase)
                if stop_on_failure and test_failed != \
                                common_mix.GlobalConstants.PASS_TEST_VERDICT:
                    break
        
        if recalculate_execution_times:
            common_fs.dumpJSON(self.test_execution_time, \
                            self.test_execution_time_storage_file, pretty=True)

        # Restore back the exes
        self._restore_env_vars()
        self._restore_default_executable(exe_path_map, env_vars, \
                                            collect_output=with_output_summary)

        if stop_on_failure:
            # Make sure the non executed test has the uncertain value (None)
            if len(test_failed_verdicts) < len(testcases):
                for testcase in set(testcases) - set(test_failed_verdicts):
                    test_failed_verdicts[testcase] = \
                            common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT
                    test_outlog_hash[testcase] = common_matrices.\
                                        OutputLogData.UNCERTAIN_TEST_OUTLOGDATA

        # @Checkpoint: Finished (for time)
        checkpoint_handler.set_finished(None)

        if not with_output_summary:
            test_outlog_hash = None

        ERROR_HANDLER.assert_true(len(testcases) == len(test_failed_verdicts),\
                  "Mismatch between testcases and test_failed_verdict (BUG)",\
                                                                     __file__)

        return test_failed_verdicts, test_outlog_hash
Exemplo n.º 21
0
    def compute_stats(config, explorer, checkpointer):
        # get the matrix of each test criterion
        coverages = {}
        total_to = {}
        number_of_testcases = None
        for c in config.ENABLED_CRITERIA.get_val():
            if explorer.file_exists(fd_structure.CRITERIA_MATRIX[c]):
                mat_file = explorer.get_existing_file_pathname(\
                                            fd_structure.CRITERIA_MATRIX[c])
                mat = common_matrices.ExecutionMatrix(filename=mat_file)
                row2collist = mat.query_active_columns_of_rows()
                cov = len([k for k,v in row2collist.items() if len(v) > 0])
                tot = len(row2collist)
                coverages[c.get_str()] = 'n.a.' if tot == 0 else \
                                          '{:.2f}'.format(cov * 100.0 / tot)
                total_to[c.get_str()] = tot
                if number_of_testcases is None:
                    number_of_testcases = len(mat.get_nonkey_colname_list())
        
        # JSON
        out_json = {}
        out_json['TOTAL EXECUTION TIME (s)'] = \
                                            checkpointer.get_execution_time()
        out_json['NUMBER OF TESTCASES'] = number_of_testcases
        out_json['CRITERIA'] = {}
        for c in coverages:
            out_json['CRITERIA'][c] = {'coverage': coverages[c], 
                                            '# test objectives': total_to[c]}
        common_fs.dumpJSON(out_json, explorer.get_file_pathname(\
                                        fd_structure.STATS_MAIN_FILE_JSON), \
                                     pretty=True)

        # HTML
        template_file = os.path.join(os.path.dirname(\
                            os.path.abspath(__file__)), 'summary_report.html')
        report_file = explorer.get_file_pathname(\
                                            fd_structure.STATS_MAIN_FILE_HTML)
        
        def format_execution_time(exec_time):
            n_day = int(exec_time // (24 * 3600))
            exec_time = exec_time % (24 * 3600)
            n_hour = int(exec_time // 3600)
            exec_time %= 3600
            n_minutes = int(exec_time // 60)
            exec_time %= 60
            n_seconds = int(round(exec_time))

            res = ""
            for val, unit in [(n_day, 'day'), (n_hour, 'hour'), \
                                (n_minutes, 'minutes'), (n_seconds, 'second')]:
                if val > 0:
                    s = ' ' if val == 1 else 's '
                    res += str(val) + ' ' + unit + s
            
            return res
        #~ def format_execution_time()

        total_exec_time = format_execution_time(\
                                            checkpointer.get_execution_time())

        rendered = Template(open(template_file).read()).render( \
                                {
                                    'total_execution_time': total_exec_time,
                                    'number_of_testcases': number_of_testcases,
                                    'coverages':coverages, 
                                    'total_to':total_to,
                                })
        with open(report_file, 'w') as f:
            f.write(rendered)
        
        try:
            webbrowser.get()
            webbrowser.open('file://' + report_file,new=2)
        except Exception as e:
            logging.warning("webbrowser error: "+str(e))
Exemplo n.º 22
0
    def _runtests(self, testcases, exe_path_map, env_vars, \
                                stop_on_failure=False, per_test_timeout=None, \
                                use_recorded_timeout_times=None, \
                                recalculate_execution_times=False, \
                                with_outlog_hash=True, parallel_count=1):
        '''
        Execute the list of test cases with the given executable and 
        say, for each test case, whether it failed.
        Note: Re-implement this if there the tool implements ways to faster
        execute multiple test cases.

        :param testcases: list of test cases to execute
        :param exe_path_map: string representing the file system path to 
                        the executable to execute with the tests
        :param env_vars: dict of environment variables to set before
                        executing each test ({<variable>: <value>})
        :param stop_on_failure: decide whether to stop the test execution once
                        a test fails
        :returns: plitair of:
                - dict of testcase and their failed verdict.
                 {<test case name>: <True if failed, False if passed, 
                    UNCERTAIN_TEST_VERDICT if uncertain>}
                 If stop_on_failure is True, only return the tests that have 
                 been executed until the failure
                - test execution output log hash data object or None
        '''
        # @Checkpoint: create a checkpoint handler (for time)
        checkpoint_handler = CheckPointHandler(self.get_checkpointer())
        if checkpoint_handler.is_finished():
            logging.warning("{} {} {}".format( \
                            "The function 'runtests' is finished according", \
                            "to checkpoint, but called again. None returned", \
                            "\nPlease Confirm reexecution..."))
            if common_mix.confirm_execution("{} {}".format( \
                                "Function 'runtests' is already", \
                                "finished, do yo want to restart?")):
                checkpoint_handler.restart()
                logging.info("Restarting the finished 'runtests'")
            else:
                ERROR_HANDLER.error_exit(err_string="{} {} {}".format( \
                        "Execution halted. Cannot continue because no value", \
                        " can be returned. Check the results of the", \
                        "finished execution"), call_location=__file__)

        if per_test_timeout is None:
            per_test_timeout = {tc: None for tc in testcases}
            if use_recorded_timeout_times is not None:
                ERROR_HANDLER.assert_true(use_recorded_timeout_times > 0, \
                                        "use_recorded_timeout_times must be "
                                        "positive if not None", __file__)
                per_test_timeout.update({x: (y * use_recorded_timeout_times) \
                                for x, y in self.test_execution_time.items()})
        else:
            ERROR_HANDLER.assert_true(use_recorded_timeout_times is None, \
                                "use_recorded_timeout_times must not be set "
                                "when per_test_timeout is set", __file__)

        # Prepare the exes
        self._prepare_executable(exe_path_map, env_vars, \
                                            collect_output=with_outlog_hash)
        self._set_env_vars(env_vars)

        test_failed_verdicts = {} 
        test_outlog_hash = {} 
        for testcase in testcases:
            start_time = time.time()
            test_failed, execoutlog_hash = \
                        self._oracle_execute_a_test(testcase, exe_path_map, \
                                        env_vars, \
                                        timeout=per_test_timeout[testcase], \
                                        with_outlog_hash=with_outlog_hash)
            
            # Record exec time if not existing
            if recalculate_execution_times:
                self.test_execution_time[testcase] = \
                                            1 + int(time.time() - start_time)

            test_failed_verdicts[testcase] = test_failed
            test_outlog_hash[testcase] = execoutlog_hash
            if stop_on_failure and test_failed != \
                                common_mix.GlobalConstants.PASS_TEST_VERDICT:
                break
        
        if recalculate_execution_times:
            common_fs.dumpJSON(self.test_execution_time, \
                            self.test_execution_time_storage_file, pretty=True)

        # Restore back the exes
        self._restore_env_vars()
        self._restore_default_executable(exe_path_map, env_vars, \
                                            collect_output=with_outlog_hash)

        if stop_on_failure:
            # Make sure the non executed test has the uncertain value (None)
            if len(test_failed_verdicts) < len(testcases):
                for testcase in set(testcases) - set(test_failed_verdicts):
                    test_failed_verdicts[testcase] = \
                            common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT
                    test_outlog_hash[testcase] = common_matrices.\
                                        OutputLogData.UNCERTAIN_TEST_OUTLOGDATA

        # @Checkpoint: Finished (for time)
        checkpoint_handler.set_finished(None)

        if not with_outlog_hash:
            test_outlog_hash = None

        return test_failed_verdicts, test_outlog_hash
Exemplo n.º 23
0
    def _collect_temporary_coverage_data(self, criteria_name_list, \
                                                test_execution_verdict, \
                                                used_environment_vars, \
                                                result_dir_tmp, \
                                                testcase):
        ''' extract coverage data into json file in result_dir_tmp
        '''
        cov_obj = coverage.Coverage(config_file=self.config_file)
        cov_obj.combine()
        tmp_dat_obj = cov_obj.get_data()

        in_dat_files = tmp_dat_obj.measured_files()

        # Get file map
        try:
            self.exes_rel
        except AttributeError:
            obj = common_fs.loadJSON(self.instrumentation_details)
            self.exes_abs = []
            self.exes_rel = []
            for rp, ap in list(obj.items()):
                self.exes_rel.append(rp)
                self.exes_abs.append(ap)
            self.exes_rel, self.exes_abs = zip(*sorted(\
                                            zip(self.exes_rel, self.exes_abs),\
                                            key=lambda x: x.count(os.path.sep)\
                                            ))

            # get executables stmt and branches
            self.executable_lines = {}
            self.executable_arcs = {}
            for fn in self.exes_abs:
                pser = coverage.parser.PythonParser(filename=fn)
                pser.parse_source()
                self.executable_lines[fn] = pser.statements
                self.executable_arcs[fn] = pser.arcs()

        dat_obj = coverage.CoverageData()
        if len(in_dat_files) > 0:
            file_map = self.PathAliases(in_dat_files, self.exes_rel, \
                                    self.used_srcs_dir, \
                                    self._get_latest_top_output_dir(), \
                                    self.code_builds_factory.\
                                        repository_manager.repo_abs_path(''))
            dat_obj.update(tmp_dat_obj, aliases=file_map)

        # Get the coverages
        res = {c: {} for c in criteria_name_list}
        if TestCriteria.STATEMENT_COVERAGE in criteria_name_list:
            for rel_fn, abs_fn in zip(self.exes_rel, self.exes_abs):
                res[TestCriteria.STATEMENT_COVERAGE][rel_fn] = \
                                {ln: 0 for ln in self.executable_lines[abs_fn]}
                ln_list = dat_obj.lines(abs_fn)
                if ln_list is not None:
                    res[TestCriteria.STATEMENT_COVERAGE][rel_fn].\
                                            update({ln:1 for ln in ln_list})
        if TestCriteria.BRANCH_COVERAGE in criteria_name_list:
            for rel_fn, abs_fn in zip(self.exes_rel, self.exes_abs):
                res[TestCriteria.BRANCH_COVERAGE][rel_fn] = \
                            {str(an): 0 for an in self.executable_arcs[abs_fn]}
                an_list = dat_obj.arcs(abs_fn)
                if an_list is not None:
                    res[TestCriteria.BRANCH_COVERAGE][rel_fn].\
                                        update({str(an):1 for an in an_list})

        # save the temporary coverage
        res_str = {v.get_str(): res[v] for v in res}
        common_fs.dumpJSON(res_str, os.path.join(result_dir_tmp,\
                                                    self.cov_data_filename))

        cov_obj.erase()
        # clean any possible raw data file
        for file_ in glob.glob(self.raw_data_file + "*"):
            os.remove(file_)
Exemplo n.º 24
0
    def _do_instrument_code (self, exe_path_map, \
                                        code_builds_factory, \
                                        enabled_criteria, parallel_count=1):
        # Setup
        if os.path.isdir(self.instrumented_code_storage_dir):
            shutil.rmtree(self.instrumented_code_storage_dir)
        os.mkdir(self.instrumented_code_storage_dir)
        if os.path.isdir(self.gc_files_dir):
            try:
                shutil.rmtree(self.gc_files_dir)
            except PermissionError:
                self._dir_chmod777(self.gc_files_dir)
                shutil.rmtree(self.gc_files_dir)
        os.mkdir(self.gc_files_dir, mode=0o777)

        prog = 'gcc'
        if self.custom_binary_dir is not None:
            prog = os.path.join(self.custom_binary_dir, prog)
            ERROR_HANDLER.assert_true(os.path.isfile(prog), \
                            "The tool {} is missing from the specified dir {}"\
                                        .format(os.path.basename(prog), \
                                            self.custom_binary_dir), __file__)

        flags = [
            '-g', '--coverage', '-fprofile-dir=' + self.gc_files_dir, '-O0'
        ]
        additionals = ["-fkeep-inline-functions"]
        #additionals.append('-fprofile-abs-path')
        #additionals.append('-fprofile-prefix-path='+code_builds_factory\
        #                        .repository_manager.get_repository_dir_path())

        # get gcc version
        ret, out, err = DriversUtils.execute_and_get_retcode_out_err(\
                                            prog, args_list=['-dumpversion'])
        ERROR_HANDLER.assert_true(ret == 0, "'gcc -dumpversion' failed'")

        # if version > 6.5
        if int(out.split('.')[0]) >= 6:
            if int(out.split('.')[0]) > 6 or int(out.split('.')[1]) > 5:
                additionals += ["-fkeep-static-functions"]

        flags += additionals

        rel_rel_path_map = {}
        rel_abs_path_map = {}
        exes, _ = code_builds_factory.repository_manager.\
                                                    get_relative_exe_path_map()
        ERROR_HANDLER.assert_true(len(exes) == 1, \
                                "Only single exe supported in GCOV", __file__)
        for exe in exes:
            filename = os.path.basename(exe)
            rel_rel_path_map[exe] = filename
            rel_abs_path_map[exe] = os.path.join(\
                                self.instrumented_code_storage_dir, filename)

        self.instrument_callback_obj = self.InstrumentCallbackObject()
        self.instrument_callback_obj.set_post_callback_args(\
                                        (self.gc_files_dir, rel_abs_path_map))
        pre_ret, ret, post_ret = code_builds_factory.transform_src_into_dest(\
                        src_fmt=CodeFormats.C_SOURCE,\
                        dest_fmt=CodeFormats.NATIVE_CODE,\
                        src_dest_files_paths_map=None,\
                        compiler=prog, flags_list=flags, clean_tmp=True, \
                        reconfigure=True, \
                        callback_object=self.instrument_callback_obj)

        # Check
        if ret == common_mix.GlobalConstants.COMMAND_FAILURE:
            ERROR_HANDLER.error_exit("Program {} {}.".format(prog,\
                                        'built problematic'), __file__)

        # write down the rel_path_map
        ERROR_HANDLER.assert_true(not os.path.isfile(\
                self.instrumentation_details), "must not exist here", __file__)
        common_fs.dumpJSON(rel_rel_path_map, self.instrumentation_details)
Exemplo n.º 25
0
    def _call_generation_run(self, runtool, args):
        # Delete any klee-out-*
        for d in os.listdir(self.tests_working_dir):
            if d.startswith('klee-out-'):
                shutil.rmtree(os.path.join(self.tests_working_dir, d))

        call_shadow_wrapper_file = os.path.join(self.tests_working_dir, \
                                                                "shadow_wrap")

        devtest_toolalias = self.parent_meta_tool.get_devtest_toolalias()
        ERROR_HANDLER.assert_true(devtest_toolalias is not None, \
                        "devtest must be used when using shadow_se", __file__)

        #test_list = list(self.code_builds_factory.repository_manager\
        #                                               .get_dev_tests_list())
        test_list = []
        for meta_test in self.parent_meta_tool.get_testcase_info_object(\
                               candidate_tool_aliases=[devtest_toolalias])\
                                                            .get_tests_list():
            toolalias, test = DriversUtils.reverse_meta_element(meta_test)
            ERROR_HANDLER.assert_true(toolalias == devtest_toolalias, \
                           "BUG in above get_testcase_info_object", __file__)
            test_list.append(test)

        # Get list of klee_change, klee_get_true/false locations.
        klee_change_stmts = []

        get_lines_callback_obj = self.GetLinesCallbackObject()
        get_lines_callback_obj.set_pre_callback_args(self.code_builds_factory\
                                    .repository_manager.revert_src_list_files)
        get_lines_callback_obj.set_post_callback_args(klee_change_stmts)

        pre_ret, post_ret = self.code_builds_factory.repository_manager\
                                    .custom_read_access(get_lines_callback_obj)
        ERROR_HANDLER.assert_true(pre_ret == \
                                common_mix.GlobalConstants.COMMAND_SUCCESS,\
                                                    "pre failed", __file__)
        ERROR_HANDLER.assert_true(post_ret == \
                                common_mix.GlobalConstants.COMMAND_SUCCESS,\
                                                    "post failed", __file__)

        ERROR_HANDLER.assert_true(len(klee_change_stmts) > 0, \
                        "No klee_change statement in the sources", __file__)

        # Filter only tests that cover those locations,
        # if there is stmt coverage matrix
        stmt_cov_mat_file = self.head_explorer.get_file_pathname(\
                            fd_structure.CRITERIA_MATRIX[criteria.TestCriteria\
                                                        .STATEMENT_COVERAGE])
        cov_tests = None
        if os.path.isfile(stmt_cov_mat_file):
            stmt_cov_mat = common_matrices.ExecutionMatrix(\
                                                    filename=stmt_cov_mat_file)
            # due to possible wrapper test splitting we update test_list here
            tmp_test_list = []
            for mt in stmt_cov_mat.get_nonkey_colname_list():
                alias, t = DriversUtils.reverse_meta_element(mt)
                if alias == devtest_toolalias:
                    tmp_test_list.append(t)
            test_list = tmp_test_list

            meta_stmts = list(stmt_cov_mat.get_keys())
            tool_aliases = set()
            for meta_stmt in meta_stmts:
                alias, stmt = DriversUtils.reverse_meta_element(meta_stmt)
                tool_aliases.add(alias)
            klee_change_meta_stmts = []
            for alias in tool_aliases:
                klee_change_meta_stmts += [\
                                    DriversUtils.make_meta_element(e, alias) \
                                                    for e in klee_change_stmts]
            klee_change_meta_stmts = list(set(meta_stmts) & \
                                                set(klee_change_meta_stmts))

            cov_tests = set()
            if len(klee_change_meta_stmts) > 0:
                for _, t in stmt_cov_mat.query_active_columns_of_rows(\
                                row_key_list=klee_change_meta_stmts).items():
                    cov_tests |= set(t)
            else:
                logging.warning('No test covers the patch (SHADOW)!')
            #    ERROR_HANDLER.assert_true(len(klee_change_meta_stmts) > 0, \
            #                            "No test covers the patch", __file__)

        # tests will be generated in the same dir that has the input .bc file
        os.mkdir(self.tests_storage_dir)

        # obtain candidate tests
        cand_testpair_list = []
        for test in test_list:
            meta_test = DriversUtils.make_meta_element(test, devtest_toolalias)
            if cov_tests is not None and meta_test not in cov_tests:
                continue
            cand_testpair_list.append((test, meta_test))

        # Adjust the max-time in args
        ## locate max-time
        per_test_hard_timeout = None
        per_test_timeout = None
        if len(cand_testpair_list) > 0:
            cur_max_time = float(self.get_value_in_arglist(args, 'max-time'))
            if self.driver_config.get_gen_timeout_is_per_test():
                per_test_timeout = cur_max_time
            else:
                per_test_timeout = max(60, \
                                       cur_max_time / len(cand_testpair_list))
            self.set_value_in_arglist(args, 'max-time', str(per_test_timeout))

            # give time to dump remaning states
            per_test_hard_timeout = per_test_timeout + \
                                self.config.TEST_GEN_TIMEOUT_FRAMEWORK_GRACE

        #per_test_hard_timeout = 300 #DBG
        # Set the wrapper
        with open(call_shadow_wrapper_file, 'w') as wf:
            wf.write('#! /bin/bash\n\n')
            wf.write('set -u\n')
            wf.write('set -o pipefail\n\n')
            wf.write('ulimit -s unlimited\n')

            # timeout the shadow execution (some test create daemon which)
            # are not killed by test timeout. ALSO MAKE SURE TO DESACTIVATE
            # IN TEST SCRIPT TIMEOUT
            kill_after = 30
            wf.write('time_out_cmd="/usr/bin/timeout --kill-after={}s {}"\n'.\
                                     format(kill_after, per_test_hard_timeout))
            # kill after and time for timeout to act
            per_test_hard_timeout += kill_after + 60

            #wf.write(' '.join(['exec', runtool] + args + ['"${@:1}"']) + '\n')
            wf.write('\nstdindata="{}/klee-last/{}"\n'.format(\
                                                    self.tests_working_dir, \
                                        KTestTestFormat.STDIN_KTEST_DATA_FILE))
            wf.write('tmpstdindata="{}/{}"\n\n'.format(self.tests_working_dir,\
                                        KTestTestFormat.STDIN_KTEST_DATA_FILE))
            wf.write('if [ -t 0 ] # check if stdin do not exist\n')
            wf.write('then\n')
            wf.write(' '.join(['\t(', '$time_out_cmd', runtool] + args + \
                                    ['"${@:1}"', ') ; EXIT_CODE=$?', '\n']))
            wf.write('\t/usr/bin/touch $tmpstdindata\n')

            wf.write('else\n')
            wf.write('\t(/bin/cat - > $tmpstdindata ) || EXIT_CODE=1\n')
            wf.write(' '.join(['\t(', '/bin/cat $tmpstdindata | ', \
                            '$time_out_cmd', runtool] + args + \
                                ['"${@:1}"', ') ; EXIT_CODE=$?', '\n']))
            wf.write('fi\n\n')
            wf.write('/bin/mv $tmpstdindata $stdindata || EXIT_CODE=2\n')
            wf.write('\n# Provoke "unbound variable" if KLEE fails\n')
            wf.write('# Preserve the KLEE exit code\n')
            wf.write('exit $EXIT_CODE\n')
        os.chmod(call_shadow_wrapper_file, 0o775)

        # run test
        exes, _ = self.code_builds_factory.repository_manager\
                                                .get_relative_exe_path_map()
        ERROR_HANDLER.assert_true(len(exes) == 1, \
                                            "Must have a single exe", __file__)
        exe_path_map = {e: call_shadow_wrapper_file for e in exes}
        env_vars = {}
        self._dir_chmod777(self.tests_storage_dir)
        for test, meta_test in cand_testpair_list:
            self.parent_meta_tool.execute_testcase(meta_test, exe_path_map, \
                                    env_vars, timeout=per_test_hard_timeout,\
                                                    with_output_summary=False)

            #logging.debug("DBG: Just executed test '{}'".format(meta_test))
            #input(">>>> ") #DBG

            # copy the klee out
            test_out = os.path.join(self.tests_storage_dir, \
                                          self.get_sorage_name_of_test(test))
            os.mkdir(test_out)
            for d in glob.glob(self.tests_working_dir + "/klee-out-*"):
                # make sure we can do anything with it
                self._dir_chmod777(d)
                if not self.keep_first_test:
                    first_test = os.path.join(d, 'test000001.ktest')
                    if os.path.isfile(first_test):
                        shutil.move(first_test, first_test + '.disable')
                shutil.move(d, test_out)
            ERROR_HANDLER.assert_true(len(list(os.listdir(test_out))) > 0, \
                                "Shadow generated no test for tescase: "+test,\
                                                                    __file__)
            if os.path.islink(os.path.join(self.tests_working_dir, \
                                                                'klee-last')):
                os.unlink(os.path.join(self.tests_working_dir, 'klee-last'))

        # store klee_change locs
        common_fs.dumpJSON(klee_change_stmts, self.klee_change_locs_list_file)
Exemplo n.º 26
0
    def _do_generate_tests (self, exe_path_map, code_builds_factory, \
                                                meta_criteria_tool_obj=None, \
                                                                max_time=None):
        # Check the passed exe_path_map
        if exe_path_map is not None:
            for r_exe, v_exe in exe_path_map.items():
                ERROR_HANDLER.assert_true(v_exe is None, \
                            "Klee driver does not use passed exe_path_map", \
                                                                    __file__)

        # Setup
        if os.path.isdir(self.tests_working_dir):
            try:
                shutil.rmtree(self.tests_working_dir)
            except PermissionError:
                self._dir_chmod777(self.tests_working_dir)
                shutil.rmtree(self.tests_working_dir)

        os.mkdir(self.tests_working_dir)
        if os.path.isdir(self.tests_storage_dir):
            shutil.rmtree(self.tests_storage_dir)

        prog = self._get_tool_name()
        if self.custom_binary_dir is not None:
            prog = os.path.join(self.custom_binary_dir, prog)
            ERROR_HANDLER.assert_true(os.path.isfile(prog), \
                            "The tool {} is missing from the specified dir {}"\
                                        .format(os.path.basename(prog), \
                                            self.custom_binary_dir), __file__)

        rel_path_map = {}
        exes, _ = code_builds_factory.repository_manager.\
                                                    get_relative_exe_path_map()
        for exe in exes:
            filename = os.path.basename(exe)
            rel_path_map[exe] = os.path.join(self.tests_working_dir, filename)

        # Get bitcode file
        bitcode_file = self._get_input_bitcode_file(code_builds_factory, \
                                                                rel_path_map, \
                                meta_criteria_tool_obj=meta_criteria_tool_obj)
        repo_rel_exe_file = list(rel_path_map)[0]

        # klee params
        bool_param, k_v_params = self._get_default_params()
        if max_time is not None:
            k_v_params['-max-time'] = str(max_time)

        # Consider pre user custom
        uc = self.config.get_tool_user_custom()
        pre_args = []
        if uc is not None:
            pre_bc_cmd = uc.PRE_TARGET_CMD_ORDERED_FLAGS_LIST
            if pre_bc_cmd is not None:
                for tup in pre_bc_cmd:
                    key = tup[0][1:] if tup[0].startswith('--') else tup[0]
                    bool_disabled = False
                    if key in k_v_params:
                        del k_v_params[key]
                    if key in bool_param:
                        del bool_param[key]
                        if len(tup) == 2:
                            ERROR_HANDLER.assert_true(\
                                        type(tup[1]) == bool, \
                                        "bool arg is either enabled or "
                                    "disabled. val must be bool", __file__)
                            if not tup[1]:
                                bool_disabled = True
                            tup = tup[:1]
                        else:
                            ERROR_HANDLER.assert_true(len(tup) == 1, \
                                    "Invalid bool param: "+key, __file__)
                    if not bool_disabled:
                        pre_args.extend(list(tup))

        args = [bp for bp, en in list(bool_param.items()) if en]
        for k, v in list(k_v_params.items()):
            if v is not None:
                args += [k, str(v)]
        args.extend(pre_args)

        args.append(bitcode_file)

        args += self._get_sym_args(args)

        self._call_generation_run(prog, list(args))

        # XXX: remove duplicate tests
        kepttest2duptest_map = {}
        folders = [self.tests_storage_dir]
        if self.gen_tests_no_dup_with_seeds:
            seed_dir = self.get_value_in_arglist(args, self.SEED_DIR_ARG_NAME)
            if seed_dir is not None:
                seed_dir = os.path.normpath(os.path.abspath(seed_dir))
                folders.append(seed_dir)

        dup_list, invalid = KTestTestFormat.ktest_fdupes(*folders, \
                        custom_replay_tool_binary_dir=self.custom_binary_dir)
        if len(invalid) > 0:
            logging.warning(\
                        "{} generated ktests are invalid ({})".format(\
                                                        len(invalid), invalid))
            for kt in invalid:
                if KTestTestFormat.get_dir(kt, folders) == \
                                                        self.tests_storage_dir:
                    os.remove(kt)
        for dup_tuple in dup_list:
            key = os.path.relpath(dup_tuple[0], \
                                KTestTestFormat.get_dir(dup_tuple[0], folders))
            kepttest2duptest_map[key] = [os.path.relpath(dp, \
                                        KTestTestFormat.get_dir(dp, folders)) \
                                                for dp in dup_tuple[1:]]
            for df in dup_tuple[1:]:
                if KTestTestFormat.get_dir(df, folders) == \
                                                        self.tests_storage_dir:
                    os.remove(df)
        common_fs.dumpJSON(kepttest2duptest_map, self.keptktest2dupktests)

        # Copy replay tool into test folder
        klee_replay_pathname = KTestTestFormat.get_test_replay_tool(\
                        custom_replay_tool_binary_dir=self.custom_binary_dir)
        shutil.copy2(klee_replay_pathname, self.tests_storage_dir)

        store_obj = {repo_rel_exe_file: os.path.basename(bitcode_file)}
        common_fs.dumpJSON(store_obj, self.test_details_file)

        # Compute must exist dirs
        test2file = {}
        for kt in self.get_testcase_info_object().get_tests_list():
            test2file[kt] = os.path.join(self.tests_storage_dir, kt)
        ktest2reqdir = Misc.get_must_exist_dirs_of_ktests(test2file, \
                                                    self.custom_binary_dir)
        ktest2reqdir = {kt: v for kt, v in ktest2reqdir.items() if len(v) > 0}
        self.ktest_with_must_exist_dir = ktest2reqdir
        if len(ktest2reqdir) > 0:
            common_fs.dumpJSON(ktest2reqdir, \
                                        self.ktest_with_must_exist_dir_file)
Exemplo n.º 27
0
    def check_get_flakiness(self, meta_testcases, repeat_count=2, \
                                              get_flaky_tests_outputs=True):
        """
            Check if tests have flakiness by running multiple times
            :return: The list of flaky tests
        """
        ERROR_HANDLER.assert_true(
            repeat_count > 1, "Cannot check flakiness"
            " with less than on repetition", __file__)

        if os.path.isdir(self.flakiness_workdir):
            shutil.rmtree(self.flakiness_workdir)
        os.mkdir(self.flakiness_workdir)

        outlog_files = [os.path.join(self.flakiness_workdir, \
                            str(of)+'-out.json') for of in range(repeat_count)]
        matrix_files = [os.path.join(self.flakiness_workdir, \
                            str(of)+'-mat.csv') for of in range(repeat_count)]

        def run(rep, test_list, hash_outlog):
            self.runtests(test_list, \
                        fault_test_execution_matrix_file=matrix_files[rep], \
                        fault_test_execution_execoutput_file=\
                                                            outlog_files[rep],\
                        with_output_summary=True, \
                        hash_outlog=hash_outlog)

        #~ def run()

        # Execute with repetition and get output summaries
        for rep in range(repeat_count):
            run(rep, meta_testcases, True)

        # get flaky tests list
        flaky_tests = set()
        fix_outdata = common_matrices.OutputLogData(filename=outlog_files[0])
        fix_outdata = list(fix_outdata.get_zip_objective_and_data())[0][1]
        for i in range(1, repeat_count):
            other_outdata = common_matrices.OutputLogData(\
                                                    filename=outlog_files[i])
            other_outdata = list(\
                            other_outdata.get_zip_objective_and_data())[0][1]
            for test, t_dat_fix in fix_outdata.items():
                if test in flaky_tests:
                    continue
                t_dat_other = other_outdata[test]
                if not common_matrices.OutputLogData.outlogdata_equiv(\
                                                    t_dat_fix, t_dat_other):
                    flaky_tests.add(test)

        ## cleanup
        for f in outlog_files + matrix_files:
            if os.path.isfile(f):
                os.remove(f)

        # get flaky tests outputs
        if get_flaky_tests_outputs and len(flaky_tests) > 0:
            for rep in range(repeat_count):
                run(rep, list(flaky_tests), False)
            flaky_test_list_file = os.path.join(self.flakiness_workdir, \
                                                        "flaky_test_list.json")
            logging.warning("There were some flaky tests (see file {})"\
                                                .format(flaky_test_list_file))
            common_fs.dumpJSON(list(flaky_tests), flaky_test_list_file, \
                                                                pretty=True)
        return list(flaky_tests)
Exemplo n.º 28
0
    def _execute_task(self, task):
        """ TODO: 
                1. implement the todos here
        """
        
        # @Checkpointing: see whether the task is untouch to clear tmp
        task_untouched = self.cp_data.tasks_obj.task_is_untouched(task)

        # Execute base on the task: ---------------------------------------
        if task == checkpoint_tasks.Tasks.STARTING:
            pass
        elif task == checkpoint_tasks.Tasks.FINISHED:
            pass

        elif task == checkpoint_tasks.Tasks.TESTS_GENERATION_GUIDANCE:
            pass #TODO: Generate focus criteria and tests and store in json file
        elif task == checkpoint_tasks.Tasks.TESTS_GENERATION:
            # @Checkpointing
            if task_untouched:
                self.meta_testcase_tool.clear_working_dir() 
                self.cp_data.tasks_obj.set_task_executing(task)
                self.checkpointer.write_checkpoint(self.cp_data.get_json_obj())

            # Generate the tests
            self.meta_testcase_tool.generate_tests(\
                                test_tool_type_list=self.cp_data.test_types)

            # @Checkpointing
            self.cp_data.tasks_obj.set_task_completed(task)
            self.checkpointer.write_checkpoint(self.cp_data.get_json_obj())
            # Destroy meta test checkpointer
            self.meta_testcase_tool.get_checkpoint_state_object()\
                                                        .destroy_checkpoint()

        elif task == checkpoint_tasks.Tasks.\
                                    TESTS_EXECUTION_SELECTION_PRIORITIZATION:
            out_file_key = outdir_struct.TMP_SELECTED_TESTS_LIST
            out_file = self.head_explorer.get_file_pathname(out_file_key)
                                        
            # @Checkpointing
            if task_untouched:
                self.head_explorer.remove_file_and_get(out_file_key)
                self.cp_data.tasks_obj.set_task_executing(task)
                self.checkpointer.write_checkpoint(self.cp_data.get_json_obj())

            all_tests = self.meta_testcase_tool.\
                                    get_testcase_info_object().get_tests_list()
            candidate_aliases = \
                        self.meta_testcase_tool.get_candidate_tools_aliases(\
                                test_tool_type_list=self.cp_data.test_types)
            selected_tests = []
            for meta_test in all_tests:
                toolalias, _ = DriversUtils.reverse_meta_element(meta_test)
                if toolalias in candidate_aliases:
                    selected_tests.append(meta_test)

            # TODO: use the actual selector. For now just use all tests
            common_fs.dumpJSON(list(selected_tests), out_file)

            # @Checkpointing
            self.cp_data.tasks_obj.set_task_completed(task)
            self.checkpointer.write_checkpoint(self.cp_data.get_json_obj())
            # Destroy meta test checkpointer
            #self.meta_testexec_optimization_tool.get_checkpoint_state_object()\
            #                                            .destroy_checkpoint()
        elif task == checkpoint_tasks.Tasks.PASS_FAIL_TESTS_EXECUTION:
            # Make sure that the Matrices dir exists
            self.head_explorer.get_or_create_and_get_dir(\
                                            outdir_struct.RESULTS_MATRICES_DIR)

            matrix_file_key = outdir_struct.TMP_TEST_PASS_FAIL_MATRIX
            matrix_file = self.head_explorer.get_file_pathname(matrix_file_key)
            execoutput_file_key = \
                                outdir_struct.TMP_PROGRAM_TESTEXECUTION_OUTPUT
            if self.config.GET_PASSFAIL_OUTPUT_SUMMARY:
                execoutput_file = self.head_explorer.get_file_pathname(\
                                                           execoutput_file_key)
            else:
                execoutput_file = None
                                    
            # @Checkpointing
            if task_untouched:
                self.head_explorer.remove_file_and_get(matrix_file_key)
                self.head_explorer.remove_file_and_get(execoutput_file_key)
                self.meta_testcase_tool.get_checkpoint_state_object()\
                                                        .restart_task()
                self.cp_data.tasks_obj.set_task_executing(task)
                self.checkpointer.write_checkpoint(self.cp_data.get_json_obj())

            # Execute tests
            test_list_file = self.head_explorer.get_file_pathname(\
                                        outdir_struct.TMP_SELECTED_TESTS_LIST)

            meta_testcases = common_fs.loadJSON(test_list_file)

            # Set test oracle
            self.test_oracle_manager.set_oracle(passfail=True)
            
            self.meta_testcase_tool.runtests(meta_testcases=meta_testcases, \
                        stop_on_failure=\
                                self.config.STOP_TESTS_EXECUTION_ON_FAILURE, \
                        recalculate_execution_times=True, \
                        fault_test_execution_matrix_file=matrix_file, \
                        fault_test_execution_execoutput_file=execoutput_file, \
                        test_prioritization_module=\
                                        self.meta_testexec_optimization_tool, \
                        finish_destroy_checkpointer=False)
            
            # Unset test oracle
            self.test_oracle_manager.set_oracle(passfail=False)

            # @Checkpointing
            self.cp_data.tasks_obj.set_task_completed(task)
            self.checkpointer.write_checkpoint(self.cp_data.get_json_obj())
            # Destroy meta test checkpointer
            self.meta_testcase_tool.get_checkpoint_state_object()\
                                                        .destroy_checkpoint()

        elif task == checkpoint_tasks.Tasks.CRITERIA_GENERATION_GUIDANCE:
            pass #TODO (Maybe could be used someday. for now, just skip it)
        elif task == checkpoint_tasks.Tasks.CRITERIA_GENERATION:
            # @Checkpointing
            if task_untouched:
                self.meta_criteria_tool.get_checkpoint_state_object()\
                                                        .restart_task()
                self.cp_data.tasks_obj.set_task_executing(task)
                self.checkpointer.write_checkpoint(self.cp_data.get_json_obj())

            self.meta_criteria_tool.instrument_code(criteria_enabled_list=\
                                        self.config.ENABLED_CRITERIA.get_val(), \
                                        finish_destroy_checkpointer=False)

            # @Checkpointing
            self.cp_data.tasks_obj.set_task_completed(task)
            self.checkpointer.write_checkpoint(self.cp_data.get_json_obj())
            # Destroy meta test checkpointer
            self.meta_criteria_tool.get_checkpoint_state_object()\
                                                        .destroy_checkpoint()
        elif task == checkpoint_tasks.Tasks.\
                                CRITERIA_EXECUTION_SELECTION_PRIORITIZATION:
            pass #TODO (Maybe could be used someday. for now, just skip it)
        elif task == checkpoint_tasks.Tasks.CRITERIA_TESTS_EXECUTION:
            # Make sure that the Matrices dir exists
            self.head_explorer.get_or_create_and_get_dir(\
                                            outdir_struct.RESULTS_MATRICES_DIR)

            matrix_files_keys = {}
            matrix_files = {}
            execoutput_files_keys = {}
            execoutput_files = {}
            for criterion in self.config.ENABLED_CRITERIA.get_val():
                matrix_files_keys[criterion] = \
                                outdir_struct.TMP_CRITERIA_MATRIX[criterion]
                matrix_files[criterion] = \
                                self.head_explorer.get_file_pathname(\
                                                matrix_files_keys[criterion])
                execoutput_files_keys[criterion] = \
                        outdir_struct.TMP_CRITERIA_EXECUTION_OUTPUT[criterion]
                if criterion in self.config.CRITERIA_WITH_OUTPUT_SUMMARY:
                    execoutput_files[criterion] = \
                                self.head_explorer.get_file_pathname(\
                                            execoutput_files_keys[criterion])
                else:
                    execoutput_files[criterion] = None

            # @Checkpointing
            if task_untouched:
                for _, matrix_file_key in list(matrix_files_keys.items()):
                    self.head_explorer.remove_file_and_get(matrix_file_key)
                for _, execoutput_file_key in list(\
                                                execoutput_files_keys.items()):
                    self.head_explorer.remove_file_and_get(execoutput_file_key)
                self.meta_criteria_tool.get_checkpoint_state_object()\
                                                        .restart_task()
                self.cp_data.tasks_obj.set_task_executing(task)
                self.checkpointer.write_checkpoint(self.cp_data.get_json_obj())

            criteria_set_sequence = self.config.CRITERIA_SEQUENCE.get_val()
            #if criteria_set_sequence is None:
            #    criteria_set_sequence = criteria_pkg.CRITERIA_SEQUENCE
            for cs_pos, criteria_set in enumerate(criteria_set_sequence):
                criteria_set &= set(matrix_files)
                if len(criteria_set) == 0:
                    continue

                # Was it already checkpointed w.r.t criteria set seq
                if self.cp_data.criteria_set_is_executed(cs_pos, criteria_set):
                    continue
                
                # If we have a new criteria set id
                self.cp_data.switchto_new_criteria_set(cs_pos, criteria_set)

                # get matrices by criteria
                test_list_file = self.head_explorer.get_file_pathname(\
                                        outdir_struct.TMP_SELECTED_TESTS_LIST)
                meta_testcases = common_fs.loadJSON(test_list_file)
                
                criterion_to_matrix = {\
                                    c: matrix_files[c] for c in criteria_set}

                # TODO: chack set based on the criteria for which execout is enabled
                criterion_to_execoutput = {\
                                c: execoutput_files[c] for c in criteria_set}

                # Set test oracle
                self.test_oracle_manager.set_oracle(criteria_on=criteria_set)

                # execute
                self.meta_criteria_tool.runtests_criteria_coverage( \
                            testcases=meta_testcases, \
                            criterion_to_matrix=criterion_to_matrix, \
                            criterion_to_executionoutput=\
                                                    criterion_to_execoutput, \
                            cover_criteria_elements_once=self.config.\
                                    COVER_CRITERIA_ELEMENTS_ONCE.get_val(),\
                            prioritization_module_by_criteria=\
                                    self.meta_criteriaexec_optimization_tools,\
                            finish_destroy_checkpointer=True)

                # Update matrix if needed to have output diff or such
                # TODO: add CRITERIA_REQUIRING_OUTDIFF_WITH_PROGRAM to config
                for crit in criteria_set & set(self.config.\
                                CRITERIA_REQUIRING_OUTDIFF_WITH_PROGRAM.\
                                                                    get_val()):
                    pf_matrix_file = self.head_explorer.get_file_pathname(\
                                    outdir_struct.TMP_TEST_PASS_FAIL_MATRIX)
                    if self.config.GET_PASSFAIL_OUTPUT_SUMMARY.get_val():
                        pf_execoutput_file = \
                                        self.head_explorer.get_file_pathname(\
                                outdir_struct.TMP_PROGRAM_TESTEXECUTION_OUTPUT)
                    else:
                        pf_execoutput_file = None
                    DriversUtils.update_matrix_to_cover_when_diference(\
                                                criterion_to_matrix[crit], \
                                            criterion_to_execoutput[crit], \
                                            pf_matrix_file, pf_execoutput_file)

                # Unset test oracle
                self.test_oracle_manager.set_oracle(criteria_on=None)

                # @Checkpointing
                self.checkpointer.write_checkpoint(self.cp_data.get_json_obj())

            # @Checkpointing
            self.cp_data.tasks_obj.set_task_completed(task)
            self.checkpointer.write_checkpoint(self.cp_data.get_json_obj())

        elif task == checkpoint_tasks.Tasks.PASS_FAIL_STATS:
            # Make sure that the Matrices dir exists
            self.head_explorer.get_or_create_and_get_dir(\
                                            outdir_struct.RESULTS_STATS_DIR)

            tmp_matrix_file = self.head_explorer.get_file_pathname(\
                                    outdir_struct.TMP_TEST_PASS_FAIL_MATRIX)
            matrix_file = self.head_explorer.get_file_pathname(\
                                    outdir_struct.TEST_PASS_FAIL_MATRIX)
            tmp_execoutput_file = self.head_explorer.get_file_pathname(\
                                outdir_struct.TMP_PROGRAM_TESTEXECUTION_OUTPUT)
            execoutput_file = self.head_explorer.get_file_pathname(\
                                    outdir_struct.PROGRAM_TESTEXECUTION_OUTPUT)
            # Merge TMP pass fail and potentially existing pass fail
            StatsComputer.merge_lmatrix_into_right(\
                                                tmp_matrix_file, matrix_file)
            StatsComputer.merge_lexecoutput_into_right(\
                                        tmp_execoutput_file, execoutput_file)

            # @Checkpointing
            self.cp_data.tasks_obj.set_task_completed(task)
            self.checkpointer.write_checkpoint(self.cp_data.get_json_obj())

            # Cleanup
            self.head_explorer.remove_file_and_get(\
                                    outdir_struct.TMP_TEST_PASS_FAIL_MATRIX)

        elif task == checkpoint_tasks.Tasks.CRITERIA_STATS:
            # Make sure that the Matrices dir exists
            self.head_explorer.get_or_create_and_get_dir(\
                                            outdir_struct.RESULTS_STATS_DIR)

            # Merge TMP criteria and potentially existing criteria
            for criterion in self.config.ENABLED_CRITERIA.get_val():
                tmp_matrix_file = self.head_explorer.get_file_pathname(\
                                outdir_struct.TMP_CRITERIA_MATRIX[criterion])
                matrix_file = self.head_explorer.get_file_pathname(\
                                outdir_struct.CRITERIA_MATRIX[criterion])
                tmp_execoutput_file = self.head_explorer.get_file_pathname(\
                        outdir_struct.TMP_CRITERIA_EXECUTION_OUTPUT[criterion])
                execoutput_file = self.head_explorer.get_file_pathname(\
                            outdir_struct.CRITERIA_EXECUTION_OUTPUT[criterion])
                StatsComputer.merge_lmatrix_into_right(tmp_matrix_file, \
                                                                matrix_file)
                StatsComputer.merge_lexecoutput_into_right(\
                                        tmp_execoutput_file, execoutput_file)

            # @Checkpointing
            self.cp_data.tasks_obj.set_task_completed(task)
            self.checkpointer.write_checkpoint(self.cp_data.get_json_obj())

            # Cleanup
            for criterion in self.config.ENABLED_CRITERIA.get_val():
                self.head_explorer.remove_file_and_get(\
                                outdir_struct.TMP_CRITERIA_MATRIX[criterion])

        elif task == checkpoint_tasks.Tasks.AGGREGATED_STATS:
            # Compute the final stats (MS, ...)
            StatsComputer.compute_stats(self.config, self.head_explorer)
        #-------------------------------------------------------------------

        if not self.cp_data.tasks_obj.task_is_complete(task):
            # @Checkpoint: set task as done
            self.cp_data.tasks_obj.set_task_completed(task)

            # @Checkpoint: write checkpoint
            self.checkpointer.write_checkpoint(\
                                        json_obj=self.cp_data.get_json_obj())