Exemplo n.º 1
0
    def cross_folder_fdupes(custom_bin, folders, folder2alias):
        dup_list, invalid = KTestTestFormat.ktest_fdupes(*folders, \
                                custom_replay_tool_binary_dir=custom_bin)
        ERROR_HANDLER.assert_true(len(invalid) == 0, \
                            "there should be no invalid here", __file__)

        kepttest2duptest_map = {}
        test2keptdup = {}
        # get fdupes with metatests
        # TODO: check that each result of relpath is really a test
        for dup_tuple in dup_list:
            k_dir = KTestTestFormat.get_dir(dup_tuple[0], folders)
            key = os.path.relpath(dup_tuple[0], k_dir)
            key = DriversUtils.make_meta_element(key, \
                                                    folder2alias[k_dir])
            kepttest2duptest_map[key] = []
            for dp in dup_tuple[1:]:
                v_dir = KTestTestFormat.get_dir(dp, folders)
                val = os.path.relpath(dp, v_dir)
                val = DriversUtils.make_meta_element(val, \
                                                    folder2alias[v_dir])
                kepttest2duptest_map[key].append(val)
                test2keptdup[val] = key

        return kepttest2duptest_map, test2keptdup
Exemplo n.º 2
0
def system_test_runner(prog, args_list, test_filename, repo_root_dir, \
                            exe_path_map=None, env_vars=None, timeout=None, \
                            collected_output=None, using_wrapper=False, \
                            dbg_log_execution_out=False):
    try:
        tmp_env = os.environ.copy()
        if env_vars is not None:
            #for e, v in env_vars.items():
            #    tmp_env[e] = v
            tmp_env.update(env_vars)
        if test_filename is not None:
            tmp_env[TEST_FILE_NAME_ENV_VAR] = test_filename
        if using_wrapper and timeout is not None:
            tmp_env[TEST_EXECUTION_TIMEOUT_ENV_VAR] = str(timeout)
            timeout = None

        if collected_output is None:
            retcode, out, err = DriversUtils.execute_and_get_retcode_out_err(\
                                prog=prog, args_list=args_list, env=tmp_env,\
                                timeout=timeout, out_on=dbg_log_execution_out,\
                                err_on=dbg_log_execution_out, \
                                merge_err_to_out=True)
        else:
            retcode, out, err = DriversUtils.execute_and_get_retcode_out_err(\
                                prog=prog, args_list=args_list, env=tmp_env,\
                                timeout=timeout, merge_err_to_out=True)
            collected_output.append(retcode)
            collected_output.append(out)
            collected_output.append(
                retcode in DriversUtils.EXEC_TIMED_OUT_RET_CODE)

        if dbg_log_execution_out:
            logging.debug("(DBG - Test Output):\n" + out)
    #except (ValueError, OSError) as os_e:
    except OSError as os_e:
        # ERROR
        logging.warning(
            "\ntest execution error in system_test_runner (bellow)")
        logging.warning(str(os_e))
        logging.warning("COMMAND: " + " ".join([prog] + args_list))
        if collected_output is not None:
            collected_output.append(None)
            collected_output.append(str(os_e))
            collected_output.append(False)
        return GlobalConstants.TEST_EXECUTION_ERROR
    except ValueError as v_e:
        logging.warning(
            "\ntest execution valueerror in system_test_runner (bellow)")
        logging.warning(str(v_e))
        logging.warning("COMMAND: " + " ".join([prog] + args_list))
        return GlobalConstants.TEST_EXECUTION_ERROR

    # Parse the result
    hasfail = False
    hasfail |= (retcode != 0)

    return GlobalConstants.FAIL_TEST_VERDICT if hasfail else \
                                            GlobalConstants.PASS_TEST_VERDICT
Exemplo n.º 3
0
    def _execute_a_test (self, testcase, exe_path_map, env_vars, \
                    callback_object=None, timeout=None, collect_output=False):
        """ Execute a test given that the executables have been set 
            properly
        """
        prog = 'klee-replay'

        if timeout is None:
            timeout = self.config.ONE_TEST_EXECUTION_TIMEOUT

        ERROR_HANDLER.assert_true(len(exe_path_map) == 1, \
                                    "support a single exe for now", __file__)
        ERROR_HANDLER.assert_true(callback_object is None, \
                                        'TODO: handle callback_obj', __file__)

        repo_exe = list(exe_path_map.keys())[0]
        local_exe = os.path.join(self.klee_used_tmp_build_dir, repo_exe)
        if repo_exe not in self.repo_exe_to_local_to_remote:
            if not os.path.isdir(os.path.dirname(local_exe)):
                os.makedirs(os.path.dirname(local_exe))
            self.repo_exe_to_local_to_remote[repo_exe] = {local_exe: None}

        remote_exe = exe_path_map[repo_exe]
        if remote_exe is None:
            remote_exe = repo_exe

        if remote_exe != self.repo_exe_to_local_to_remote[repo_exe][local_exe]:
            if remote_exe == repo_exe:
                self.code_builds_factory.repository_manager.\
                                    set_repo_to_build_default(\
                                        also_copy_to_map={repo_exe: local_exe})
            else:
                shutil.copy2(remote_exe, local_exe)

        args = [local_exe, os.path.join(self.tests_storage_dir, testcase)]
        tmp_env = os.environ.copy()
        #tmp_env.update(env_vars)
        tmp_env['KLEE_REPLAY_TIMEOUT'] = str(timeout)
        if collect_output:
            retcode, out, err = DriversUtils.execute_and_get_retcode_out_err(\
                                    prog=prog, args_list=args, env=tmp_env, \
                                                        merge_err_to_out=True)
            output_err = (retcode, out)
        else:
            retcode, out, err = DriversUtils.execute_and_get_retcode_out_err(\
                                    prog=prog, args_list=args, env=tmp_env, \
                                                    out_on=False, err_on=False)
            output_err = None

        if retcode in (DriversUtils.EXEC_TIMED_OUT_RET_CODE, \
                                    DriversUtils.EXEC_SEGFAULT_OUT_RET_CODE):
            verdict = common_mix.GlobalConstants.FAIL_TEST_VERDICT
        else:
            verdict = common_mix.GlobalConstants.PASS_TEST_VERDICT

        return verdict, output_err
Exemplo n.º 4
0
def make_build_func(repo_root_dir, exe_rel_paths, compiler, flags_list, clean,\
                                                                reconfigure):
    """ Helper for GNU make
    """
    def print_err(out, msg):
        out = out.splitlines()
        print(out, msg)

    #~ def print_err()

    cwd = os.getcwd()
    os.chdir(repo_root_dir)

    #try:
    tmp_env = os.environ.copy()
    if compiler is not None:
        tmp_env["CC"] = compiler
    if flags_list is not None:
        tmp_env["CFLAGS"] = " ".join(flags_list)

    if reconfigure:
        args_list = ['clean']
        retcode, out, _ = DriversUtils.execute_and_get_retcode_out_err(\
                                prog='make', args_list=args_list, \
                                env=tmp_env, merge_err_to_out=True)
        if retcode != 0:
            print_err(out, "reconfigure failed")
            os.chdir(cwd)
            return GlobalConstants.COMMAND_FAILURE
    if clean:
        args_list = ['clean']
        retcode, out, _ = DriversUtils.execute_and_get_retcode_out_err(\
                                prog='make', args_list=args_list, \
                                env=tmp_env, merge_err_to_out=True)
        if retcode != 0:
            print_err(out, "clean failed")
            os.chdir(cwd)
            return GlobalConstants.COMMAND_FAILURE

    retcode, out, _ = DriversUtils.execute_and_get_retcode_out_err(\
                        prog='make', env=tmp_env, merge_err_to_out=True)
    if retcode != 0:
        print_err(out, "make")
        os.chdir(cwd)
        return GlobalConstants.COMMAND_FAILURE
    #except:
    #    os.chdir(cwd)
    #    assert False, "Build Unexpected Error in "+__file__
    #return GlobalConstants.COMMAND_FAILURE

    os.chdir(cwd)
    return GlobalConstants.COMMAND_SUCCESS


#~ def make_build_func()
Exemplo n.º 5
0
 def post_test_dup(stored_map, tests_to_avoid, origin,
                   kepttest2duptest_map):
     for kept, duplist in kepttest2duptest_map.items():
         alias, _ = DriversUtils.reverse_meta_element(kept)
         if alias == origin:
             stored_map[kept] = []
             for dup in duplist:
                 dalias, dtest = DriversUtils.reverse_meta_element(dup)
                 if dalias == origin:
                     continue
                 stored_map[kept].append(dup)
                 tests_to_avoid.append(dup)
Exemplo n.º 6
0
def fix_matrices(results_data_dir):
    import os
    from muteria.drivers import DriversUtils
    sm_mat = os.path.join(results_data_dir, 'matrices', 'STRONG_MUTATION.csv')
    sm_out = os.path.join(results_data_dir, 'testexecution_outputs',
                          'STRONG_MUTATION_output.json')
    p_mat = os.path.join(results_data_dir, 'matrices', 'PASSFAIL.csv')
    p_out = os.path.join(results_data_dir, 'testexecution_outputs',
                         'program_output.json')
    DriversUtils.update_matrix_to_cover_when_difference(
        sm_mat, sm_out, p_mat, p_out)
    print("Fix success for {}!".format(results_data_dir))
Exemplo n.º 7
0
def fault_analysis(cm_corebench_scripts_dir, c_id, conf_py, in_muteria_outdir,
                   out_top_dir):
    if not os.path.isdir(out_top_dir):
        os.mkdir(out_top_dir)

    in_res_data_dir = os.path.join(in_muteria_outdir, 'latest', 'RESULTS_DATA')
    testtools_workdir = os.path.join(in_muteria_outdir, 'latest',
                                     'testscases_workdir')
    pass_fail_matrix = os.path.join(in_res_data_dir, "matrices",
                                    "PASSFAIL.csv")

    pf_mat = common_matrices.ExecutionMatrix(filename=pass_fail_matrix)
    test_list = list(pf_mat.get_nonkey_colname_list())
    #semu_tool_dirs = [d for d in os.listdir(testtools_workdir) if d.startswith('semu_cmp-')]

    # get fault tests
    get_commit_fault_tests(cm_corebench_scripts_dir, c_id, conf_py,
                           in_res_data_dir, out_top_dir)

    # get test to timestamp
    test_timestamp_file = os.path.join(out_top_dir, "test_to_timestamp.json")
    test2timestamp = {}
    ## get tests by tools
    tools2tests = {}
    for test in test_list:
        alias, _ = DriversUtils.reverse_meta_element(test)
        # XXX: only SEMU
        if not alias.startswith('semu_cmp-'):
            continue
        if alias not in tools2tests:
            tools2tests[alias] = set()
        tools2tests[alias].add(test)
    ## untar tests dir
    for alias, tests in tools2tests.items():
        d = os.path.join(testtools_workdir, alias)
        assert os.path.isdir(
            d), "test tool dir " + d + " missing for alias " + alias
        test_tar = os.path.join(d, 'tests_files.tar.gz')
        es = common_fs.TarGz.decompressDir(test_tar)
        assert es is None, "decompress error: " + es
        tests_files = os.path.join(d, 'tests_files')
        assert os.path.isdir(tests_files), "dir missing after decompress"
        for test in tests:
            _, simple_test = DriversUtils.reverse_meta_element(test)
            gt = TestcasesToolSemu._get_generation_time_of_test(
                simple_test, tests_files)
            test2timestamp[test] = gt
        shutil.rmtree(tests_files)

    common_fs.dumpJSON(test2timestamp, test_timestamp_file, pretty=True)
Exemplo n.º 8
0
    def reset(self, toolalias, test_objective_list, test_list, **kwargs):
        """ Reset the optimizer
        """
        self.test_objective_ordered_list = copy.deepcopy(test_objective_list)
        self.pointer = 0
        self.test_objective_to_test_execution_optimizer = {
            to: TestExecutionOptimizer(self.config, self.explorer) \
            for to in self.test_objective_ordered_list
        }

        # get the test list per test objectives, based on the matrix
        opt_by_criterion = self._get_optimizing_criterion()
        matrix_file = self.explorer.get_existing_file_pathname(\
                                explorer.TMP_CRITERIA_MATRIX[opt_by_criterion])
        test_list_per_test_objective = \
                            self._get_test_objective_tests_from_matrix(\
                                                    matrix_file=matrix_file)

        # Update test list
        for to, teo in self.test_objective_to_test_execution_optimizer.items():
            alias_to = DriversUtils.make_meta_element(to, toolalias)
            ERROR_HANDLER.assert_true(\
                                    alias_to in test_list_per_test_objective, \
                                    "Bug: test objective missing("+str(to)+')')
            teo.reset(None, test_list_per_test_objective[alias_to], \
                                                            disable_reset=True)
Exemplo n.º 9
0
    def reset(self, toolalias, test_objective_list, test_list, **kwargs):
        """ Reset the optimizer
        """
        # Make sure that all objectives in test_objective_list are in dictobj
        # and get test_of_testobjective
        test_of_to = {}
        for to in test_objective_list:
            meta_to = DriversUtils.make_meta_element(to, toolalias)
            ERROR_HANDLER.assert_true(meta_to in self.dictobj, \
                                "meta test objective {} not in self.dictobj"\
                                                    .format(meta_to), __file__)
            test_of_to[to] = self.dictobj[meta_to]

            # If None as testlist, use all tests
            if test_of_to[to] is None:
                test_of_to[to] = test_list
            else:
                test_of_to[to] = list(set(test_list) & set(test_of_to[to]))

        self.test_objective_ordered_list = copy.deepcopy(test_objective_list)
        self.pointer = 0
        self.test_objective_to_test_execution_optimizer = {
            to: TestExecutionOptimizer(self.config, self.explorer) \
            for to in self.test_objective_ordered_list
        }
        for to, teo in self.test_objective_to_test_execution_optimizer.items():
            teo.reset(None, test_of_to[to], disable_reset=True)
Exemplo n.º 10
0
    def _extract_coverage_data_of_a_test(self, enabled_criteria, \
                                    test_execution_verdict, result_dir_tmp):
        ''' read json files and extract data
            return: the dict of criteria with covering count
        '''
        in_file = os.path.join(result_dir_tmp, self.cov_data_filename)
        cov_dat_obj = common_fs.loadJSON(in_file)
        cov_dat_obj = {TestCriteria[v]: cov_dat_obj[v] for v in cov_dat_obj}

        ERROR_HANDLER.assert_true(set(cov_dat_obj) == set(enabled_criteria), \
                                    "mismatching criteria enabled", __file__)

        res = {c: {} for c in enabled_criteria}

        for c in cov_dat_obj:
            for filename, filedat in list(cov_dat_obj[c].items()):
                if filedat is not None:
                    for id_, cov in list(filedat.items()):
                        ident = DriversUtils.make_meta_element(\
                                                            str(id_), filename)
                        res[c][ident] = cov

        # delete cov file
        os.remove(in_file)

        return res
Exemplo n.º 11
0
 def installed(cls, custom_binary_dir=None):
     """ Check that the tool is installed
         :return: bool reprenting whether the tool is installed or not 
                 (executable accessible on the path)
                 - True: the tool is installed and works
                 - False: the tool is not installed or do not work
     """
     for prog in (cls.tool, ):
         if custom_binary_dir is not None:
             prog = os.path.join(custom_binary_dir, prog)
         if not DriversUtils.check_tool(prog=prog, args_list=['--version'],\
                                                 expected_exit_codes=[1]):
             return False
     if not DriversUtils.check_tool(prog=cls.stdbuf, args_list=['--version'],\
                                                 expected_exit_codes=[0]):
         return False
     return True
Exemplo n.º 12
0
    def _do_instrument_code (self, outputdir, exe_path_map, \
                                        code_builds_factory, \
                                        enabled_criteria, parallel_count=1):
        # Setup
        if os.path.isdir(self.instrumented_code_storage_dir):
            shutil.rmtree(self.instrumented_code_storage_dir)
        os.mkdir(self.instrumented_code_storage_dir)
        if os.path.isdir(self.gc_files_dir):
            shutil.rmtree(self.gc_files_dir)
        os.mkdir(self.gc_files_dir)

        prog = 'gcc'

        flags = ['--coverage', '-fprofile-dir='+self.gc_files_dir, '-O0']
        additionals = ["-fkeep-inline-functions"]
        
        # get gcc version
        ret, out, err = DriversUtils.execute_and_get_retcode_out_err(\
                                                        prog, ['-dumpversion'])
        ERROR_HANDLER.assert_true(ret == 0, "'gcc -dumpversion' failed'")
        
        # if version > 6.5
        if int(out.split('.')[0]) >= 6:
            if int(out.split('.')[0]) > 6 or int(out.split('.')[1]) > 5:
                additionals += ["-fkeep-static-functions"]
        
        flags += additionals
        
        rel_path_map = {}
        exes, _ = code_builds_factory.repository_manager.\
                                                    get_relative_exe_path_map()
        for exe in exes:
            filename = os.path.basename(exe)
            rel_path_map[exe] = os.path.join(\
                                self.instrumented_code_storage_dir, filename)

        self.instrument_callback_obj = self.InstrumentCallbackObject()
        self.instrument_callback_obj.set_post_callback_args(\
                                            (self.gc_files_dir, rel_path_map))
        pre_ret, ret, post_ret = code_builds_factory.transform_src_into_dest(\
                        src_fmt=CodeFormats.C_SOURCE,\
                        dest_fmt=CodeFormats.NATIVE_CODE,\
                        src_dest_files_paths_map=None,\
                        compiler=prog, flags_list=flags, clean_tmp=True, \
                        reconfigure=True, \
                        callback_object=self.instrument_callback_obj)
        
        # Check
        if ret == common_mix.GlobalConstants.COMMAND_FAILURE:
            ERROR_HANDLER.error_exit("Program {} {}.".format(prog,\
                                        'built problematic'), __file__)

        # write down the rel_path_map
        ERROR_HANDLER.assert_true(not os.path.isfile(\
                self.instrumentation_details), "must not exist here", __file__)
        common_fs.dumpJSON(rel_path_map, self.instrumentation_details)
Exemplo n.º 13
0
 def installed(cls, custom_binary_dir=None):
     """ Check that the tool is installed
         :return: bool reprenting whether the tool is installed or not 
                 (executable accessible on the path)
                 - True: the tool is installed and works
                 - False: the tool is not installed or do not work
     """
     for prog in ('klee', 'klee-replay'):
         if not DriversUtils.check_tool(prog=prog, args_list=['--version'],\
                                                 expected_exit_codes=[1]):
             return False
     return True
Exemplo n.º 14
0
    def _do_instrument_code (self, exe_path_map, \
                                        code_builds_factory, \
                                        enabled_criteria, parallel_count=1):
        # Setup
        if os.path.isdir(self.instrumented_code_storage_dir):
            shutil.rmtree(self.instrumented_code_storage_dir)
        os.mkdir(self.instrumented_code_storage_dir)
        if os.path.isdir(self.mutant_data):
            shutil.rmtree(self.mutant_data)
        os.mkdir(self.mutant_data)

        prog = 'gpmutation'
        if self.custom_binary_dir is not None:
            prog = os.path.join(self.custom_binary_dir, prog)
            ERROR_HANDLER.assert_true(os.path.isfile(prog), \
                            "The tool {} is missing from the specified dir {}"\
                                        .format(os.path.basename(prog), \
                                            self.custom_binary_dir), __file__)

        exes, _ = code_builds_factory.repository_manager.\
                                                    get_relative_exe_path_map()

        ERROR_HANDLER.assert_true(len(exes) == 1, \
                                        "Support only a singe exe", __file__)

        gpmutation_subj = os.path.basename(exes[0])
        args=[gpmutation_subj, self.separate_muts_dir]

        # Execute GPMutation
        ret, out, err = DriversUtils.execute_and_get_retcode_out_err(\
                                                        prog, args_list=args)
        if (ret != 0):
            logging.error(out)
            logging.error(err)
            logging.error("\n>> CMD: " + " ".join([prog]+args) + '\n')
            ERROR_HANDLER.error_exit("gpmutation failed!", __file__)
        
        # write down the rel_path_map
        ERROR_HANDLER.assert_true(not os.path.isfile(\
                self.instrumentation_details), "must not exist here", __file__)
        
        with open (self.instrumentation_details, 'w') as f:
            for exe in exes:
                f.write(exe+'\n')

        # Archive separated if on
        if self.archive_separated:
            err_msg = common_fs.TarGz.compressDir(self.separate_muts_dir, \
                                                    remove_in_directory=True)
            ERROR_HANDLER.assert_true(err_msg is None,\
                                "Compression failed: "+str(err_msg), __file__)
Exemplo n.º 15
0
 def _dir_chmod777(self, dirpath):
     try:
         for root_, dirs_, files_ in os.walk(dirpath):
             for sub_d in dirs_:
                 if os.path.isdir(os.path.join(root_, sub_d)):
                     os.chmod(os.path.join(root_, sub_d), 0o777)
             for f_ in files_:
                 if os.path.isfile(os.path.join(root_, f_)):
                     os.chmod(os.path.join(root_, f_), 0o777)
     except PermissionError:
         ret,_,_ = DriversUtils.execute_and_get_retcode_out_err('sudo', \
                             args_list=['chmod 777 -R {}'.format(dirpath)])
         ERROR_HANDLER.assert_true(ret == 0, \
                     "'sudo chmod 777 -R "+dirpath+"' failed (returned "+\
                                                     str(ret)+")", __file__)
Exemplo n.º 16
0
 def _compute_testcases_info(self, candidate_tool_aliases=None):
     meta_testcase_info_obj = TestcasesInfoObject()
     if candidate_tool_aliases is None:
         candidate_tool_aliases = self.testcases_configured_tools.keys()
     for ttoolalias in candidate_tool_aliases:
         ttool = \
             self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY]
         tool_testcase_info = ttool.get_testcase_info_object()
         old2new_tests = {}
         for t_test in tool_testcase_info.get_tests_list():
             meta_t_key = DriversUtils.make_meta_element(t_test, ttoolalias)
             old2new_tests[t_test] = meta_t_key
         meta_testcase_info_obj.update_using(ttoolalias, old2new_tests, \
                                                         tool_testcase_info)
     return meta_testcase_info_obj
Exemplo n.º 17
0
    def _collect_temporary_coverage_data(self, criteria_name_list, \
                                            test_execution_verdict, \
                                            used_environment_vars, \
                                                    result_dir_tmp):
        ''' get gcov files from gcda files into result_dir_tmp
        '''
        prog = 'gcov'

        cov2flags = {
                    TestCriteria.STATEMENT_COVERAGE: [],
                    TestCriteria.BRANCH_COVERAGE: ['-b', '-c'],
                    TestCriteria.FUNCTION_COVERAGE: ['-f'],
                }

        args_list = []
        for criterion in criteria_name_list:
            args_list += cov2flags[criterion]

        gcda_files = self._get_gcda_list()

        raw_filename_list = [os.path.splitext(f)[0] for f in gcda_files]
        args_list += raw_filename_list
        
        if len(gcda_files) > 0:
            # TODO: When gcov generate coverage for different files with
            # same name filename bu located at diferent dir. Avoid override.
            # Go where the gcov will be looked for
            cwd = os.getcwd()
            os.chdir(self.gc_files_dir)

            # collect gcda (gcno)
            r, _, _ = DriversUtils.execute_and_get_retcode_out_err(prog=prog, \
                                        args_list=args_list, out_on=False, \
                                                                err_on=False)

            os.chdir(cwd)
            
            if r != 0:
                ERROR_HANDLER.error_exit("Program {} {}.".format(prog,\
                        'error collecting coverage is problematic'), __file__)
            
            # delete gcda
            for gcda_f in gcda_files:
                os.remove(gcda_f)
            
            common_fs.dumpJSON(self._get_gcov_list(), \
                                os.path.join(result_dir_tmp,\
                                                self.gcov_files_list_filename))
Exemplo n.º 18
0
 def get_too2relmuts(relmuts_to_reltests, toollist=None):
     res = {tool: set()
            for tool in toollist} if toollist is not None else {}
     for relmut, t_list in relmuts_to_reltests.items():
         for meta_t in t_list:
             toolalias, test = DriversUtils.reverse_meta_element(meta_t)
             if toollist is None:
                 if toolalias not in res:
                     res[toolalias] = set()
             else:
                 assert toolalias in toollist, "PB: toolalias ({}) not in toollist ({})".format(
                     toolalias, toollist)
             res[toolalias].add(relmut)
     for toolalias in res:
         res[toolalias] = list(res[toolalias])
     return res
Exemplo n.º 19
0
 def after_command(self):
     if self.op_retval == common_mix.GlobalConstants.COMMAND_FAILURE:
         return common_mix.GlobalConstants.COMMAND_FAILURE
     m_regex = re.compile('({}|{}|{})'.format('klee_change', \
                                     'klee_get_true', 'klee_get_false'))
     matched_lines = set()
     for src in self.source_files_to_objects:
         with open(os.path.join(self.repository_rootdir, src), \
                                                 encoding='UTF-8') as f:
             for lnum, line in enumerate(f.readlines()):
                 if m_regex.search(line) is not None:
                     matched_lines.add(DriversUtils.make_meta_element(\
                                                     str(lnum+1), src))
     ret_lines = self.post_callback_args
     ret_lines.extend(matched_lines)
     return common_mix.GlobalConstants.COMMAND_SUCCESS
Exemplo n.º 20
0
        def tool_parallel_test_exec(ttoolalias):
            # Actual execution
            found_a_failure = False
            # Whether the execution was unsuccessful
            test_error = False
            ttool = \
                self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY]
            test_failed_verdicts, test_execoutput = ttool.runtests( \
                                testcases_by_tool[ttoolalias], \
                                exe_path_map, env_vars, \
                                stop_on_failure, \
                                per_test_timeout=per_test_timeout,
                                use_recorded_timeout_times=\
                                    use_recorded_timeout_times, \
                                recalculate_execution_times=\
                                    recalculate_execution_times, \
                                with_output_summary=\
                                            with_output_summary, \
                                hash_outlog=hash_outlog, \
                                parallel_count=\
                                    parallel_test_count_by_tool[ttoolalias])
            with shared_loc:
                for testcase in test_failed_verdicts:
                    meta_testcase = DriversUtils.make_meta_element(\
                                                        testcase, ttoolalias)
                    meta_test_failedverdicts_outlog[0][meta_testcase] = \
                                                test_failed_verdicts[testcase]
                    meta_test_failedverdicts_outlog[1][meta_testcase] = \
                                                    test_execoutput[testcase]
                    if not found_a_failure \
                                and test_failed_verdicts[testcase] == \
                                common_mix.GlobalConstants.FAIL_TEST_VERDICT:
                        found_a_failure = True
                    if not test_error \
                                and test_failed_verdicts[testcase] == \
                            common_mix.GlobalConstants.TEST_EXECUTION_ERROR:
                        test_error = True

                # @Checkpoint: Chekpointing
                checkpoint_handler.do_checkpoint(func_name=cp_func_name, \
                                taskid=cp_task_id, \
                                tool=ttoolalias, \
                                opt_payload=meta_test_failedverdicts_outlog)
            return found_a_failure, test_error
Exemplo n.º 21
0
    def execute_testcase (self, meta_testcase, exe_path_map, env_vars, \
                                        timeout=None, \
                                        use_recorded_timeout_times=None, \
                                        recalculate_execution_times=False, \
                                        with_output_summary=True, \
                                        hash_outlog=None):
        '''
        Execute a test case with the given executable and 
        say whether it failed

        :param meta_testcase: string name of the test cases to execute
        :param exe_path_map: string representing the file system path to 
                        the executable to execute with the test
        :param env_vars: dict of environment variables to set before
                        executing the test ({<variable>: <value>})
        :type hash_outlog: bool
        :hash_outlog: decide whether to hash the outlog or not
        :returns: pair of:
                - boolean failed verdict of the test 
                        (True if failed, False otherwise)
                - test execution output log hash data object or None
        '''

        if exe_path_map is None:
            exe_path_map = self._get_default_exe_path_map()

        if hash_outlog is None:
            hash_outlog = self.hash_outlog

        # Find which test tool's the testcase is, then execute
        ttoolalias, testcase = DriversUtils.reverse_meta_element(meta_testcase)
        ERROR_HANDLER.assert_true( \
                            ttoolalias in self.testcases_configured_tools, \
                            "Test tool {} not registered".format(ttoolalias), \
                                                                    __file__)
        ttool = self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY]
        return ttool.execute_testcase(testcase, exe_path_map, env_vars, \
                            timeout=timeout, \
                            use_recorded_timeout_times=\
                                                use_recorded_timeout_times, \
                            recalculate_execution_times=\
                                                recalculate_execution_times, \
                            with_output_summary=with_output_summary, \
                            hash_outlog=hash_outlog)
Exemplo n.º 22
0
def python_unittest_runner(test_name, repo_root_dir, exe_path_map, env_vars, \
                                            timeout, collected_output=None):
    # TODO: use exe_path_map 

    def parse_test(s):
        return s.split('...')[0].replace(':','/').replace(' ','')

    cwd = os.getcwd()
    os.chdir(repo_root_dir)

    try:
        args_list = ['-m', 'unittest', test_name, '-v']

        if collected_output is None:
            retcode, stdout, _ = DriversUtils.execute_and_get_retcode_out_err(\
                                prog=sys.executable, args_list=args_list, \
                                        timeout=timeout, merge_err_to_out=True)
            stdout = stdout.splitlines()
        else:
            # collected_output is a list ([retcode, out_err_log])
            # TODO: use wrapper? or parse out for particular codes... 
            assert False, "TO BE Implemented"
    except:
        # ERROR
        os.chdir(cwd)
        return GlobalConstants.TEST_EXECUTION_ERROR
    
    # Parse the result
    subtests_verdicts = {}
    hasfail = False
    hasfail |= (retcode != 0)
    for s in stdout:
        if s.endswith('... FAIL'):
            hasfail = True
            subtests_verdicts[parse_test(s)] = True
        elif s.endswith('... ok'):
            subtests_verdicts[parse_test(s)] = False
    #print(subtests_verdicts)
    os.chdir(cwd)
    return GlobalConstants.FAIL_TEST_VERDICT if hasfail else \
                                            GlobalConstants.PASS_TEST_VERDICT
#~ def python_unittest_runner()
Exemplo n.º 23
0
    def _compute_criterion_info(self, criterion, candidate_tool_aliases=None):
        if criterion not in CriteriaToInfoObject:
            return None

        meta_criterion_info_obj = CriteriaToInfoObject[criterion]()
        if candidate_tool_aliases is None:
            candidate_tool_aliases = []
            for _, config in list(self.tools_config_by_criterion_dict.items()):
                candidate_tool_aliases.append(config.get_tool_config_alias())
        for ctoolalias in candidate_tool_aliases:
            ctool = self.criteria_configured_tools[ctoolalias]\
                                                            [self.TOOL_OBJ_KEY]
            tool_element_info = ctool.get_criterion_info_object(criterion)
            old2new_tests = {}
            for c_elem in tool_element_info.get_elements_list():
                meta_c_key = DriversUtils.make_meta_element(c_elem, ctoolalias)
                old2new_tests[c_elem] = meta_c_key
            meta_criterion_info_obj.update_using(ctoolalias, old2new_tests, \
                                                            tool_element_info)
        return meta_criterion_info_obj
Exemplo n.º 24
0
def make_build(repo_root_dir, exe_rel_paths, compiler, flags_list, clean,\
                                                                reconfigure):
    cwd = os.getcwd()
    os.chdir(repo_root_dir)
    if flags_list is None:
        flags_list = ["-DUSING_MUTERIA"]

    else:
        flags_list.append("-DUSING_MUTERIA")
    # try:
    tmp_env = os.environ.copy()
    if compiler is not None:
        tmp_env["CC"] = compiler
    if flags_list is not None:
        tmp_env["CFLAGS"] = " ".join(flags_list)
    args_list = ['build_project.sh']
    if reconfigure:
        args_list.append('1')
    else:
        args_list.append('0')

    if clean:
        args_list.append('1')
    else:
        args_list.append('0')

    retcode, out, _ = DriversUtils.execute_and_get_retcode_out_err(
        prog='bash', args_list=args_list, env=tmp_env, merge_err_to_out=True)

    def print_err(out, msg):
        #out = out.splitlines()
        #print(out, msg)
        logging.error(str(out) + msg)

    if retcode != 0:
        print_err(out, "make")
        os.chdir(cwd)
        return GlobalConstants.COMMAND_FAILURE
    os.chdir(cwd)
    return GlobalConstants.COMMAND_SUCCESS
Exemplo n.º 25
0
    def _get_input_bitcode_file(self, code_builds_factory, rel_path_map, \
                                                meta_criteria_tool_obj=None):
        meta_mu_src = self.driver_config.get_meta_mutant_source()

        # XXX Case of manual annotation
        if meta_mu_src == MetaMuSource.ANNOTATION:
            with open(self.cand_muts_file, 'w') as f:
                # Single mutant (id 1, corresponding to old version)
                f.write(str(1) + '\n')
            return super(TestcasesToolSemu, self)._get_input_bitcode_file(\
                                        code_builds_factory, rel_path_map, \
                                meta_criteria_tool_obj=meta_criteria_tool_obj)
        if type(meta_mu_src) == str:
            # XXX: The actual path to the meta is specified
            return meta_mu_src

        # XXX: Case of other mutation tools like Mart
        # get the meta criterion file from MART or any compatible tool.
        mutant_gen_tool_name = meta_mu_src.get_field_value()
        mut_tool_alias_to_obj = \
                            meta_criteria_tool_obj.get_criteria_tools_by_name(\
                                                        mutant_gen_tool_name)

        if len(mut_tool_alias_to_obj) == 0:
            logging.warning(\
                'SEMu requires {} to generate mutants but none used'.format(\
                                                        mutant_gen_tool_name))

        ERROR_HANDLER.assert_true(len(mut_tool_alias_to_obj) == 1, \
                                "SEMu supports tests generation from"
                                "a single .bc file for now (todo).", __file__)

        t_alias2metamu_bc = {}
        t_alias2mutantInfos = {}
        for alias, obj in mut_tool_alias_to_obj.items():
            dest_bc = rel_path_map[list(rel_path_map)[0]] + '.bc'
            shutil.copy2(obj.get_test_gen_metamutant_bc(), dest_bc)
            t_alias2metamu_bc[alias] = dest_bc
            t_alias2mutantInfos[alias] = obj.get_criterion_info_object(None)

        # XXX: get mutants ids by functions
        self.mutants_by_funcs = {}
        single_alias = list(t_alias2mutantInfos)[0]
        single_tool_obj = t_alias2mutantInfos[single_alias]

        cand_muts = list(single_tool_obj.get_elements_list())

        for mut in single_tool_obj.get_elements_list():
            func = single_tool_obj.get_element_data(mut)[\
                                                        'mutant_function_name']
            #meta_mut = DriversUtils.make_meta_element(mut, single_alias)
            if func not in self.mutants_by_funcs:
                self.mutants_by_funcs[func] = set()
            self.mutants_by_funcs[func].add(mut)  #meta_mut)

        # XXX: get candidate mutants list
        if self.driver_config.get_target_only_live_mutants() \
                                        and os.path.isfile(self.sm_mat_file):
            sm_mat = common_matrices.ExecutionMatrix(\
                                                filename=self.sm_mat_file)
            mut2killing_tests = sm_mat.query_active_columns_of_rows()
            alive_muts = [m for m, k_t in mut2killing_tests.items() \
                                                        if len(k_t) == 0]
            cand_muts = []
            for meta_m in alive_muts:
                t_alias, m = DriversUtils.reverse_meta_element(meta_m)
                if t_alias in t_alias2metamu_bc:  # There is a single one
                    cand_muts.append(m)

        with open(self.cand_muts_file, 'w') as f:
            for m in cand_muts:
                f.write(str(m) + '\n')

        return t_alias2metamu_bc[list(t_alias2metamu_bc)[0]]
Exemplo n.º 26
0
    def runtests(self, meta_testcases=None, exe_path_map=None, env_vars=None, \
                        stop_on_failure=False, \
                        per_test_timeout=None, \
                        use_recorded_timeout_times=None, \
                        recalculate_execution_times=False, \
                        fault_test_execution_matrix_file=None, \
                        fault_test_execution_execoutput_file=None, \
                        with_output_summary=True, \
                        hash_outlog=None, \
                        test_prioritization_module=None, \
                        parallel_test_count=1, \
                        parallel_test_scheduler=None, \
                        restart_checkpointer=False,
                        finish_destroy_checkpointer=True):
        '''
        Execute the list of test cases with the given executable and 
        say, for each test case, whether it failed

        :param meta_testcases: list of test cases to execute
        :param exe_path_map: string representing the file system path to 
                        the executable to execute with the tests
        :param env_vars: dict of environment variables to set before
                        executing each test ({<variable>: <value>})
        :param stop_on_failure: decide whether to stop the test 
                        execution once a test fails
        :param fault_test_execution_matrix_file: Optional matrix file 
                        to store the tests' pass fail execution data
        :param fault_test_execution_execoutput_file: Optional output log file 
                        to store the tests' execution actual output (hashed)
        :param with_output_summary: decide whether to return outlog hash 
        :type hash_outlog: bool
        :hash_outlog: decide whether to hash the outlog or not
        :param test_prioritization_module: Specify the test prioritization
                        module. 
                        (TODO: Implement support)
        :param parallel_test_count: Specify the number of parallel test
                        Execution. must be an integer >= 1 or None.
                        When None, the max possible value is used.
        :param parallel_test_scheduler: Specify the function that will
                        handle parallel test scheduling by tool, using
                        the test execution optimizer. 
                        (TODO: Implement support)

        :type restart_checkointer: bool
        :param restart_checkointer: Decide whether to discard checkpoint
                        and restart anew.

        :type finish_destroy_checkpointer: bool
        :param finish_destroy_checkpointer: Decide whether to automatically 
                        destroy the checkpointer when done or not
                        Useful is caller has a checkpointer to update. 

        :returns: dict of testcase and their failed verdict.
                 {<test case name>: <True if failed, False if passed,
                    UNCERTAIN_TEST_VERDICT if uncertain>}
                 If stop_on_failure is True, only return the tests that 
                 have been executed until the failure
        '''

        ERROR_HANDLER.assert_true(meta_testcases is not None, \
                                            "Must specify testcases", __file__)

        # FIXME: Make sure that the support are implemented for
        # parallelism and test prioritization. Remove the code bellow
        # once supported:
        ERROR_HANDLER.assert_true(test_prioritization_module is None, \
                        "Must implement test prioritization support here", \
                                                                    __file__)
        ERROR_HANDLER.assert_true(parallel_test_scheduler is None, \
                    "Must implement parallel tests execution support here", \
                                                                    __file__)
        #~FIXMEnd

        # Check arguments Validity
        if exe_path_map is None:
            exe_path_map = self._get_default_exe_path_map()

        if hash_outlog is None:
            hash_outlog = self.hash_outlog

        ERROR_HANDLER.assert_true(parallel_test_count is None \
                                        or parallel_test_count >= 1, \
                                "invalid parallel tests count ({})".format(\
                                                parallel_test_count), __file__)

        # @Checkpoint: create a checkpoint handler
        cp_func_name = "runtests"
        cp_task_id = 1
        checkpoint_handler = \
                CheckPointHandler(self.get_checkpoint_state_object())
        if restart_checkpointer:
            checkpoint_handler.restart()
        if checkpoint_handler.is_finished():
            logging.warning("%s %s" %("The function 'runtests' is finished", \
                "according to checkpoint, but called again. None returned"))
            if common_mix.confirm_execution("%s %s" % ( \
                                        "Function 'runtests' is already", \
                                        "finished, do you want to restart?")):
                checkpoint_handler.restart()
                logging.info("Restarting the finished 'runtests'")
            else:
                ERROR_HANDLER.error_exit(err_string="%s %s %s" % (\
                        "Execution halted. Cannot continue because no value", \
                        " can be returned. Check the results of the", \
                        "finished execution"), call_location=__file__)

        # @Checkpoint: Get the saved payload (data kapt for each tool)
        # pair list of testfailed verdict and execution output
        meta_test_failedverdicts_outlog = \
                                    checkpoint_handler.get_optional_payload()
        if meta_test_failedverdicts_outlog is None:
            meta_test_failedverdicts_outlog = [{}, {}]

        # Make sure the tests are unique
        ERROR_HANDLER.assert_true(len(meta_testcases) == \
                                                len(set(meta_testcases)), \
                                        "not all tests are unique", __file__)

        # For fdupes
        if len(self.tests_duplicates_map) > 0:
            meta_testcases_backup = meta_testcases
            meta_testcases = set(meta_testcases)
            dups_remove_meta_testcases = meta_testcases & \
                                                set(self.tests_duplicates_map)
            dup_toadd_test = {self.tests_duplicates_map[v] for v in \
                                dups_remove_meta_testcases} - meta_testcases
            meta_testcases = (meta_testcases - dups_remove_meta_testcases) \
                                                            | dup_toadd_test

        testcases_by_tool = {}
        for meta_testcase in meta_testcases:
            ttoolalias, testcase = \
                            DriversUtils.reverse_meta_element(meta_testcase)
            if ttoolalias not in testcases_by_tool:
                testcases_by_tool[ttoolalias] = []
            testcases_by_tool[ttoolalias].append(testcase)

        candidate_aliases = []
        for tpos, ttoolalias in enumerate(testcases_by_tool.keys()):
            # @Checkpoint: Check whether already executed
            if not checkpoint_handler.is_to_execute(func_name=cp_func_name, \
                                                taskid=cp_task_id, \
                                                tool=ttoolalias):
                continue
            candidate_aliases.append(ttoolalias)

        # parallelism strategies
        PARA_FULL_DOUBLE = 0
        PARA_ALT_TOOLS_AND_TESTS = 1
        PARA_TOOLS_ONLY = 2
        PARA_TOOLS_TESTS_AS_TOOLS = 3

        parallel_strategy = PARA_TOOLS_ONLY

        # minimum number of tests (accross) for parallelism
        ptest_tresh = 5
        # minimum number of tests (of the given tool) for tool parallelism
        sub_ptest_thresh = 3

        shared_loc = multiprocessing.RLock()

        parallel_test_count_by_tool = {ta: 1 for ta in candidate_aliases}

        # tool with parallel test exec
        # TODO: find way to pass parallel count here
        if parallel_test_count is None:
            #parallel_test_count = min(10, multiprocessing.cpu_count())
            parallel_test_count = min(20, 2 * multiprocessing.cpu_count())

        cand_alias_joblib = []
        cand_alias_for = []

        para_tools = []
        para_tools = [tt for tt in candidate_aliases if \
                            (len(testcases_by_tool[tt]) >= sub_ptest_thresh \
                              and self.testcases_configured_tools[tt]\
                               [self.TOOL_OBJ_KEY].can_run_tests_in_parallel())
                        ]

        actual_parallel_cond = len(candidate_aliases) > 1 \
                                     and len(meta_testcases) >= ptest_tresh \
                                        and parallel_test_count is not None \
                                        and parallel_test_count > 1

        if parallel_strategy == PARA_ALT_TOOLS_AND_TESTS:
            # the para_tools will run without parallelism, give them all threads
            for tt in para_tools:
                parallel_test_count_by_tool[tt] = parallel_test_count
            seq_tools = list(set(candidate_aliases) - set(para_tools))
            if len(seq_tools) > 1 and actual_parallel_cond:
                cand_alias_joblib = seq_tools
                cand_alias_for = para_tools
            else:
                cand_alias_for = candidate_aliases
        elif parallel_strategy == PARA_TOOLS_ONLY:
            if actual_parallel_cond:
                cand_alias_joblib = candidate_aliases
            else:
                cand_alias_for = candidate_aliases
        elif parallel_strategy == PARA_FULL_DOUBLE:
            # use parallel
            sub_parallel_count = 0 if parallel_test_count is None else \
                        parallel_test_count - len(parallel_test_count_by_tool)
            if sub_parallel_count > 0:
                para_tools.sort(reverse=True, \
                                        key=lambda x: len(testcases_by_tool[x]))
                para_tools_n_tests = sum(\
                            [len(testcases_by_tool[tt]) for tt in para_tools])

                used = 0
                for tt in para_tools:
                    quota = int(len(testcases_by_tool[tt]) * \
                                    sub_parallel_count / para_tools_n_tests)
                    parallel_test_count_by_tool[tt] += quota
                    used += quota
                for tt in para_tools:
                    if used == sub_parallel_count:
                        break
                    parallel_test_count_by_tool[tt] += 1

            if actual_parallel_cond:
                cand_alias_joblib = candidate_aliases
            else:
                cand_alias_for = candidate_aliases
        elif parallel_strategy == PARA_TOOLS_TESTS_AS_TOOLS:
            # split the tests of one tool and
            # make the same tool run multiple times
            ERROR_HANDLER.error_exit("To Be implemented: same tool many times")
        else:
            ERROR_HANDLER.error_exit("Invalid parallel startegy")

        def tool_parallel_test_exec(ttoolalias):
            # Actual execution
            found_a_failure = False
            # Whether the execution was unsuccessful
            test_error = False
            ttool = \
                self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY]
            test_failed_verdicts, test_execoutput = ttool.runtests( \
                                testcases_by_tool[ttoolalias], \
                                exe_path_map, env_vars, \
                                stop_on_failure, \
                                per_test_timeout=per_test_timeout,
                                use_recorded_timeout_times=\
                                    use_recorded_timeout_times, \
                                recalculate_execution_times=\
                                    recalculate_execution_times, \
                                with_output_summary=\
                                            with_output_summary, \
                                hash_outlog=hash_outlog, \
                                parallel_count=\
                                    parallel_test_count_by_tool[ttoolalias])
            with shared_loc:
                for testcase in test_failed_verdicts:
                    meta_testcase = DriversUtils.make_meta_element(\
                                                        testcase, ttoolalias)
                    meta_test_failedverdicts_outlog[0][meta_testcase] = \
                                                test_failed_verdicts[testcase]
                    meta_test_failedverdicts_outlog[1][meta_testcase] = \
                                                    test_execoutput[testcase]
                    if not found_a_failure \
                                and test_failed_verdicts[testcase] == \
                                common_mix.GlobalConstants.FAIL_TEST_VERDICT:
                        found_a_failure = True
                    if not test_error \
                                and test_failed_verdicts[testcase] == \
                            common_mix.GlobalConstants.TEST_EXECUTION_ERROR:
                        test_error = True

                # @Checkpoint: Chekpointing
                checkpoint_handler.do_checkpoint(func_name=cp_func_name, \
                                taskid=cp_task_id, \
                                tool=ttoolalias, \
                                opt_payload=meta_test_failedverdicts_outlog)
            return found_a_failure, test_error

        #~ def tool_parallel_test_exec()

        if len(cand_alias_joblib) > 0:
            parallel_count_ = min(len(cand_alias_joblib), parallel_test_count)
            joblib.Parallel(n_jobs=parallel_count_, require='sharedmem')\
                    (joblib.delayed(tool_parallel_test_exec)(ttoolalias) \
                        for ttoolalias in cand_alias_joblib)
        if len(cand_alias_for) > 0:
            for tpos, ttoolalias in enumerate(cand_alias_for):
                found_a_failure, test_error = \
                                        tool_parallel_test_exec(ttoolalias)
                if stop_on_failure and found_a_failure:
                    # @Checkpoint: Chekpointing for remaining tools
                    for rem_tool in list(testcases_by_tool.keys())[tpos + 1:]:
                        checkpoint_handler.do_checkpoint(\
                                func_name=cp_func_name, \
                                taskid=cp_task_id, \
                                tool=rem_tool, \
                                opt_payload=meta_test_failedverdicts_outlog)
                    break

        if stop_on_failure:
            # Make sure the non executed test has the uncertain value (None)
            if len(meta_test_failedverdicts_outlog[0]) < len(meta_testcases):
                for meta_testcase in set(meta_testcases) - \
                                    set(meta_test_failedverdicts_outlog[0]):
                    meta_test_failedverdicts_outlog[0][meta_testcase] = \
                            common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT
                    meta_test_failedverdicts_outlog[1][meta_testcase] = \
                            common_matrices.OutputLogData.\
                                                    UNCERTAIN_TEST_OUTLOGDATA

        ERROR_HANDLER.assert_true(len(meta_test_failedverdicts_outlog[0]) == \
                                                        len(meta_testcases), \
                    "mismatch between number of tests and reported verdicts:"
                    + " Tests without verdict are {};".format(\
                               set(meta_testcases) - \
                                    set(meta_test_failedverdicts_outlog[0])) \
                    + " Test not in testlist are {}.".format(\
                               set(meta_test_failedverdicts_outlog[0]) - \
                                    set(meta_testcases)), \
                                                                     __file__)

        # For fdupes
        if len(self.tests_duplicates_map) > 0:
            meta_testcases = meta_testcases_backup
            for i in (0, 1):
                for mtest in dups_remove_meta_testcases:
                    # add to results
                    meta_test_failedverdicts_outlog[i][mtest] = copy.deepcopy(\
                                        meta_test_failedverdicts_outlog[i]\
                                            [self.tests_duplicates_map[mtest]])
                for mtest in dup_toadd_test:
                    # remove from results
                    del meta_test_failedverdicts_outlog[i][mtest]

        if fault_test_execution_matrix_file is not None:
            # Load or Create the matrix
            fault_test_execution_matrix = common_matrices.ExecutionMatrix( \
                                filename=fault_test_execution_matrix_file, \
                                            non_key_col_list=meta_testcases)
            ERROR_HANDLER.assert_true(fault_test_execution_matrix.is_empty(), \
                                "matrix must be empty. Filename is:"
                                " "+fault_test_execution_matrix_file, __file__)
            failverdict2val = {
                common_mix.GlobalConstants.FAIL_TEST_VERDICT: \
                        fault_test_execution_matrix.getActiveCellDefaultVal(),
                common_mix.GlobalConstants.PASS_TEST_VERDICT: \
                            fault_test_execution_matrix.getInactiveCellVal(),
                common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT: \
                    fault_test_execution_matrix.getUncertainCellDefaultVal(),
            }
            cells_dict = {}
            for meta_testcase in meta_test_failedverdicts_outlog[0]:
                cells_dict[meta_testcase] = failverdict2val[\
                            meta_test_failedverdicts_outlog[0][meta_testcase]]

            fault_test_execution_matrix.add_row_by_key(self.FAULT_MATRIX_KEY, \
                                                cells_dict, serialize=True)

        if fault_test_execution_execoutput_file is not None:
            # Load or Create the data object
            fault_test_execution_execoutput = common_matrices.OutputLogData( \
                                filename=fault_test_execution_execoutput_file)
            ERROR_HANDLER.assert_true(\
                            fault_test_execution_execoutput.is_empty(), \
                                        "outlog data must be empty", __file__)
            fault_test_execution_execoutput.add_data(\
                                    {self.PROGRAM_EXECOUTPUT_KEY: \
                                         meta_test_failedverdicts_outlog[1]}, \
                                                                serialize=True)

        # @Checkpoint: Finished
        detailed_exectime = {}
        for ttoolalias in testcases_by_tool.keys():
            tt = self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY]
            detailed_exectime[ttoolalias] = (\
                        tt.get_checkpointer().get_execution_time(),\
                        tt.get_checkpointer().get_detailed_execution_time())

        checkpoint_handler.set_finished( \
                                    detailed_exectime_obj=detailed_exectime)

        if finish_destroy_checkpointer:
            checkpoint_handler.destroy()

        return meta_test_failedverdicts_outlog
Exemplo n.º 27
0
    def _do_instrument_code (self, outputdir, exe_path_map, \
                                        code_builds_factory, \
                                        enabled_criteria, parallel_count=1):
        # Setup
        if os.path.isdir(self.instrumented_code_storage_dir):
            shutil.rmtree(self.instrumented_code_storage_dir)
        os.mkdir(self.instrumented_code_storage_dir)
        if os.path.isdir(self.mutant_data):
            shutil.rmtree(self.mutant_data)
        os.mkdir(self.mutant_data)

        prog = 'mart'

        # get llvm compiler path
        ret, out, err = DriversUtils.execute_and_get_retcode_out_err(\
                                                        prog, ['--version'])
        llvm_compiler_path = None
        for line in out.splitlines():
            line = line.strip()
            if line.startswith('LLVM tools dir:'):
                llvm_compiler_path = line.split()[3]  #[1:-1]
                break

        ERROR_HANDLER.assert_true(llvm_compiler_path is not None, \
                                'Problem getting llvm path for mart', __file__)

        # Build into LLVM
        back_llvm_compiler = 'clang'
        rel_path_map = {}
        exes, _ = code_builds_factory.repository_manager.\
                                                    get_relative_exe_path_map()
        for exe in exes:
            filename = os.path.basename(exe)
            rel_path_map[exe] = os.path.join(self.mutant_data, filename)
        pre_ret, ret, post_ret = code_builds_factory.transform_src_into_dest(\
                        src_fmt=CodeFormats.C_SOURCE,\
                        dest_fmt=CodeFormats.LLVM_BITCODE,\
                        src_dest_files_paths_map=rel_path_map,\
                        compiler=back_llvm_compiler, flags_list=['-g'], \
                        clean_tmp=True, reconfigure=True, \
                        llvm_compiler_path=llvm_compiler_path)
        if ret == common_mix.GlobalConstants.TEST_EXECUTION_ERROR:
            ERROR_HANDLER.error_exit("Program {}.".format(\
                                'LLVM (clang) built problematic'), __file__)

        # Update exe_map to reflect bitcode extension
        rel2bitcode = {}
        for r_file, b_file in list(rel_path_map.items()):
            bc = b_file + '.bc'
            ERROR_HANDLER.assert_true(os.path.isfile(bc), \
                                    "Bitcode file not existing: "+bc, __file__)
            rel2bitcode[r_file] = bc

        ERROR_HANDLER.assert_true(len(rel_path_map) == 1, \
                            "Support single bitcode module for now", __file__)

        bitcode_file = rel2bitcode[list(rel2bitcode.keys())[0]]

        # mart params
        bool_param, k_v_params = self._get_default_params()
        if TestCriteria.STRONG_MUTATION in enabled_criteria:
            bool_param['-write-mutants'] = True

        args = [bp for bp, en in list(bool_param.items()) if en]
        for k, v in list(k_v_params.items()):
            if v is not None:
                args += [k, v]
        args.append(bitcode_file)

        # Execute Mart
        cwd = os.getcwd()
        os.chdir(self.mutant_data)
        ret, out, err = DriversUtils.execute_and_get_retcode_out_err(\
                                                                    prog, args)
        os.chdir(cwd)

        if (ret != 0):
            logging.error(out)
            logging.error(err)
            logging.error("\n>> CMD: " + " ".join([prog] + args) + '\n')
            ERROR_HANDLER.error_exit("mart failed'", __file__)

        # write down the rel_path_map
        ERROR_HANDLER.assert_true(not os.path.isfile(\
                self.instrumentation_details), "must not exist here", __file__)
        store_obj = {c.get_str(): {} for c in enabled_criteria}
        for k, v in list(rel_path_map.items()):
            exe_file = os.path.basename(v)
            if TestCriteria.WEAK_MUTATION in enabled_criteria:
                crit_str = TestCriteria.WEAK_MUTATION.get_str()
                store_obj[crit_str][k] = exe_file + '.WM'
            if TestCriteria.MUTANT_COVERAGE in enabled_criteria:
                crit_str = TestCriteria.MUTANT_COVERAGE.get_str()
                store_obj[crit_str][k] = exe_file + '.COV'
            if TestCriteria.STRONG_MUTATION in enabled_criteria:
                crit_str = TestCriteria.STRONG_MUTATION.get_str()
                store_obj[crit_str][k] = exe_file + '.MetaMu'
        common_fs.dumpJSON(store_obj, self.instrumentation_details)
Exemplo n.º 28
0
    def _runtest_separate_criterion_program (self, criterion, testcases, \
                                    matrix,
                                    executionoutput, \
                                    criteria_element_list, \
                                    cover_criteria_elements_once=False, \
                                    prioritization_module=None, \
                                    test_parallel_count=1,
                                    serialize_period=5,
                                    checkpoint_handler=None, \
                                    cp_calling_func_name=None, \
                                    cp_calling_done_task_id=None, \
                                    cp_calling_tool=None):
        '''
        Note: Here the temporary matrix is used as checkpoint 
                (with frequency the 'serialize_period' parameter). 
            The checkpointer is mainly used for the execution time
            (TODO: support parallelism: per test outdata)
        '''
        # FIXME: Support parallelism, then remove the code
        # bellow:
        ERROR_HANDLER.assert_true(test_parallel_count <= 1, \
                    "FIXME: Must first implement support for parallel mutatio")
        #~ FXIMEnd

        # @Checkpoint: validate
        if checkpoint_handler is not None:
            ERROR_HANDLER.assert_true(cp_calling_func_name is not None)
            ERROR_HANDLER.assert_true(cp_calling_done_task_id is not None)
            ERROR_HANDLER.assert_true(cp_calling_tool is not None)
            cp_data = checkpoint_handler.get_optional_payload()
            if cp_data is None:
                cp_data = [{}, {}]
        else:
            cp_data = [{}, {}]

        failverdict_to_val_map = {
                    common_mix.GlobalConstants.FAIL_TEST_VERDICT: \
                                            matrix.getActiveCellDefaultVal(),
                    common_mix.GlobalConstants.PASS_TEST_VERDICT: \
                                            matrix.getInactiveCellVal(),
                    common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT: \
                                            matrix.getUncertainCellDefaultVal()
        }

        assert serialize_period >= 1, \
                            "Serialize period must be an integer in [1,inf["

        # matrix based checkpoint
        completed_elems = set(cp_data[0])

        if criteria_element_list is None:
            criteria_element_list = self.get_criterion_info_object(criterion).\
                                                            get_elements_list()

        ERROR_HANDLER.assert_true(prioritization_module is not None,
                                  "prioritization module must be passed")

        timeout_times = \
                    self.config.SEPARATED_TEST_EXECUTION_EXTRA_TIMEOUT_TIMES

        # in case the test list is empty, do nothing
        if len(testcases) > 0:
            # main loop for elements execution
            num_elems = len(criteria_element_list)
            pos = -1 + len(completed_elems)
            ## prepare the optimizer
            prioritization_module.reset(self.config.get_tool_config_alias(), \
                                            criteria_element_list, testcases)
            while prioritization_module.has_next_test_objective():
                #pos += 1 # Done bellow in the case where it was not completed
                element = prioritization_module.get_next_test_objective()

                # @Checkpointing: check if already executed
                if element in completed_elems:
                    continue
                else:
                    pos += 1

                logging.debug("# Executing {} element {} ({}/{}) ...".format( \
                                    criterion.get_str(), \
                                    DriversUtils.make_meta_element(element, \
                                        self.config.get_tool_config_alias()), \
                                    pos, num_elems))

                # execute element with the given testcases
                element_executable_path = \
                                self._get_criterion_element_executable_path(\
                                                            criterion, element)
                execution_environment_vars = \
                                self._get_criterion_element_environment_vars(\
                                                            criterion, element)
                # run optimizer with all tests of targeting the test objective
                may_cov_tests = prioritization_module\
                                        .get_test_execution_optimizer(element)\
                                        .select_tests(100, is_proportion=True)

                cannot_cov_tests = set(testcases) - set(may_cov_tests)

                fail_verdicts, exec_outs_by_tests = \
                                    self.meta_test_generation_obj.runtests(\
                                        meta_testcases=may_cov_tests, \
                                        exe_path_map=element_executable_path, \
                                        env_vars=execution_environment_vars, \
                                        stop_on_failure=\
                                                cover_criteria_elements_once, \
                                        use_recorded_timeout_times=\
                                                            timeout_times, \
                                        with_output_summary=(executionoutput \
                                                                is not None), \
                                        parallel_test_count=None, \
                                        restart_checkpointer=True)
                prioritization_module.feedback(element, fail_verdicts)

                fail_verdicts.update({\
                            v: common_mix.GlobalConstants.PASS_TEST_VERDICT \
                                                for v in cannot_cov_tests})
                # put in row format for matrix
                matrix_row_key = element
                matrix_row_values = \
                                {tc:failverdict_to_val_map[fail_verdicts[tc]] \
                                                    for tc in fail_verdicts}
                serialize_on = (pos % serialize_period == 0)
                cp_data[0][matrix_row_key] = matrix_row_values

                if executionoutput is not None:
                    cp_data[1][element] = exec_outs_by_tests

                # @Checkpointing: for time
                if serialize_on and checkpoint_handler is not None:
                    checkpoint_handler.do_checkpoint( \
                                            func_name=cp_calling_func_name, \
                                            taskid=cp_calling_done_task_id, \
                                            tool=cp_calling_tool, \
                                            opt_payload=cp_data)

        # Write the execution data into the matrix
        for matrix_row_key, matrix_row_values in list(cp_data[0].items()):
            matrix.add_row_by_key(matrix_row_key, matrix_row_values, \
                                                            serialize=False)

        # TODO: Make the following two instructions atomic
        # final serialization (in case #Muts not multiple od serialize_period)
        matrix.serialize()

        # Write the execution output data
        if executionoutput is not None:
            if len(cp_data[1]) > 0:
                executionoutput.add_data(cp_data[1], serialize=True)
            else:
                executionoutput.serialize()
Exemplo n.º 29
0
    def _call_generation_run(self, runtool, args):
        # Delete any klee-out-*
        for d in os.listdir(self.tests_working_dir):
            if d.startswith('klee-out-'):
                shutil.rmtree(os.path.join(self.tests_working_dir, d))

        call_shadow_wrapper_file = os.path.join(self.tests_working_dir, \
                                                                "shadow_wrap")

        devtest_toolalias = self.parent_meta_tool.get_devtest_toolalias()
        ERROR_HANDLER.assert_true(devtest_toolalias is not None, \
                        "devtest must be used when using shadow_se", __file__)

        #test_list = list(self.code_builds_factory.repository_manager\
        #                                               .get_dev_tests_list())
        test_list = []
        for meta_test in self.parent_meta_tool.get_testcase_info_object(\
                               candidate_tool_aliases=[devtest_toolalias])\
                                                            .get_tests_list():
            toolalias, test = DriversUtils.reverse_meta_element(meta_test)
            ERROR_HANDLER.assert_true(toolalias == devtest_toolalias, \
                           "BUG in above get_testcase_info_object", __file__)
            test_list.append(test)

        # Get list of klee_change, klee_get_true/false locations.
        klee_change_stmts = []

        get_lines_callback_obj = self.GetLinesCallbackObject()
        get_lines_callback_obj.set_pre_callback_args(self.code_builds_factory\
                                    .repository_manager.revert_src_list_files)
        get_lines_callback_obj.set_post_callback_args(klee_change_stmts)

        pre_ret, post_ret = self.code_builds_factory.repository_manager\
                                    .custom_read_access(get_lines_callback_obj)
        ERROR_HANDLER.assert_true(pre_ret == \
                                common_mix.GlobalConstants.COMMAND_SUCCESS,\
                                                    "pre failed", __file__)
        ERROR_HANDLER.assert_true(post_ret == \
                                common_mix.GlobalConstants.COMMAND_SUCCESS,\
                                                    "post failed", __file__)

        ERROR_HANDLER.assert_true(len(klee_change_stmts) > 0, \
                        "No klee_change statement in the sources", __file__)

        # Filter only tests that cover those locations,
        # if there is stmt coverage matrix
        stmt_cov_mat_file = self.head_explorer.get_file_pathname(\
                            fd_structure.CRITERIA_MATRIX[criteria.TestCriteria\
                                                        .STATEMENT_COVERAGE])
        cov_tests = None
        if os.path.isfile(stmt_cov_mat_file):
            stmt_cov_mat = common_matrices.ExecutionMatrix(\
                                                    filename=stmt_cov_mat_file)
            # due to possible wrapper test splitting we update test_list here
            tmp_test_list = []
            for mt in stmt_cov_mat.get_nonkey_colname_list():
                alias, t = DriversUtils.reverse_meta_element(mt)
                if alias == devtest_toolalias:
                    tmp_test_list.append(t)
            test_list = tmp_test_list

            meta_stmts = list(stmt_cov_mat.get_keys())
            tool_aliases = set()
            for meta_stmt in meta_stmts:
                alias, stmt = DriversUtils.reverse_meta_element(meta_stmt)
                tool_aliases.add(alias)
            klee_change_meta_stmts = []
            for alias in tool_aliases:
                klee_change_meta_stmts += [\
                                    DriversUtils.make_meta_element(e, alias) \
                                                    for e in klee_change_stmts]
            klee_change_meta_stmts = list(set(meta_stmts) & \
                                                set(klee_change_meta_stmts))

            cov_tests = set()
            if len(klee_change_meta_stmts) > 0:
                for _, t in stmt_cov_mat.query_active_columns_of_rows(\
                                row_key_list=klee_change_meta_stmts).items():
                    cov_tests |= set(t)
            else:
                logging.warning('No test covers the patch (SHADOW)!')
            #    ERROR_HANDLER.assert_true(len(klee_change_meta_stmts) > 0, \
            #                            "No test covers the patch", __file__)

        # tests will be generated in the same dir that has the input .bc file
        os.mkdir(self.tests_storage_dir)

        # obtain candidate tests
        cand_testpair_list = []
        for test in test_list:
            meta_test = DriversUtils.make_meta_element(test, devtest_toolalias)
            if cov_tests is not None and meta_test not in cov_tests:
                continue
            cand_testpair_list.append((test, meta_test))

        # Adjust the max-time in args
        ## locate max-time
        per_test_hard_timeout = None
        per_test_timeout = None
        if len(cand_testpair_list) > 0:
            cur_max_time = float(self.get_value_in_arglist(args, 'max-time'))
            if self.driver_config.get_gen_timeout_is_per_test():
                per_test_timeout = cur_max_time
            else:
                per_test_timeout = max(60, \
                                       cur_max_time / len(cand_testpair_list))
            self.set_value_in_arglist(args, 'max-time', str(per_test_timeout))

            # give time to dump remaning states
            per_test_hard_timeout = per_test_timeout + \
                                self.config.TEST_GEN_TIMEOUT_FRAMEWORK_GRACE

        #per_test_hard_timeout = 300 #DBG
        # Set the wrapper
        with open(call_shadow_wrapper_file, 'w') as wf:
            wf.write('#! /bin/bash\n\n')
            wf.write('set -u\n')
            wf.write('set -o pipefail\n\n')
            wf.write('ulimit -s unlimited\n')

            # timeout the shadow execution (some test create daemon which)
            # are not killed by test timeout. ALSO MAKE SURE TO DESACTIVATE
            # IN TEST SCRIPT TIMEOUT
            kill_after = 30
            wf.write('time_out_cmd="/usr/bin/timeout --kill-after={}s {}"\n'.\
                                     format(kill_after, per_test_hard_timeout))
            # kill after and time for timeout to act
            per_test_hard_timeout += kill_after + 60

            #wf.write(' '.join(['exec', runtool] + args + ['"${@:1}"']) + '\n')
            wf.write('\nstdindata="{}/klee-last/{}"\n'.format(\
                                                    self.tests_working_dir, \
                                        KTestTestFormat.STDIN_KTEST_DATA_FILE))
            wf.write('tmpstdindata="{}/{}"\n\n'.format(self.tests_working_dir,\
                                        KTestTestFormat.STDIN_KTEST_DATA_FILE))
            wf.write('if [ -t 0 ] # check if stdin do not exist\n')
            wf.write('then\n')
            wf.write(' '.join(['\t(', '$time_out_cmd', runtool] + args + \
                                    ['"${@:1}"', ') ; EXIT_CODE=$?', '\n']))
            wf.write('\t/usr/bin/touch $tmpstdindata\n')

            wf.write('else\n')
            wf.write('\t(/bin/cat - > $tmpstdindata ) || EXIT_CODE=1\n')
            wf.write(' '.join(['\t(', '/bin/cat $tmpstdindata | ', \
                            '$time_out_cmd', runtool] + args + \
                                ['"${@:1}"', ') ; EXIT_CODE=$?', '\n']))
            wf.write('fi\n\n')
            wf.write('/bin/mv $tmpstdindata $stdindata || EXIT_CODE=2\n')
            wf.write('\n# Provoke "unbound variable" if KLEE fails\n')
            wf.write('# Preserve the KLEE exit code\n')
            wf.write('exit $EXIT_CODE\n')
        os.chmod(call_shadow_wrapper_file, 0o775)

        # run test
        exes, _ = self.code_builds_factory.repository_manager\
                                                .get_relative_exe_path_map()
        ERROR_HANDLER.assert_true(len(exes) == 1, \
                                            "Must have a single exe", __file__)
        exe_path_map = {e: call_shadow_wrapper_file for e in exes}
        env_vars = {}
        self._dir_chmod777(self.tests_storage_dir)
        for test, meta_test in cand_testpair_list:
            self.parent_meta_tool.execute_testcase(meta_test, exe_path_map, \
                                    env_vars, timeout=per_test_hard_timeout,\
                                                    with_output_summary=False)

            #logging.debug("DBG: Just executed test '{}'".format(meta_test))
            #input(">>>> ") #DBG

            # copy the klee out
            test_out = os.path.join(self.tests_storage_dir, \
                                          self.get_sorage_name_of_test(test))
            os.mkdir(test_out)
            for d in glob.glob(self.tests_working_dir + "/klee-out-*"):
                # make sure we can do anything with it
                self._dir_chmod777(d)
                if not self.keep_first_test:
                    first_test = os.path.join(d, 'test000001.ktest')
                    if os.path.isfile(first_test):
                        shutil.move(first_test, first_test + '.disable')
                shutil.move(d, test_out)
            ERROR_HANDLER.assert_true(len(list(os.listdir(test_out))) > 0, \
                                "Shadow generated no test for tescase: "+test,\
                                                                    __file__)
            if os.path.islink(os.path.join(self.tests_working_dir, \
                                                                'klee-last')):
                os.unlink(os.path.join(self.tests_working_dir, 'klee-last'))

        # store klee_change locs
        common_fs.dumpJSON(klee_change_stmts, self.klee_change_locs_list_file)
Exemplo n.º 30
0
    def _do_generate_tests (self, exe_path_map, outputdir, \
                                        code_builds_factory, max_time=None):
        # Setup
        if os.path.isdir(self.tests_working_dir):
            shutil.rmtree(self.tests_working_dir)
        os.mkdir(self.tests_working_dir)
        if os.path.isdir(self.tests_storage_dir):
            shutil.rmtree(self.tests_storage_dir)

        prog = 'klee'
        default_sym_args = ['-sym-arg', '5']
        back_llvm_compiler = None  #'clang'

        rel_path_map = {}
        exes, _ = code_builds_factory.repository_manager.\
                                                    get_relative_exe_path_map()
        for exe in exes:
            filename = os.path.basename(exe)
            rel_path_map[exe] = os.path.join(self.tests_working_dir, filename)
        pre_ret, ret, post_ret = code_builds_factory.transform_src_into_dest(\
                        src_fmt=CodeFormats.C_SOURCE,\
                        dest_fmt=CodeFormats.LLVM_BITCODE,\
                        src_dest_files_paths_map=rel_path_map,\
                        compiler=back_llvm_compiler, \
                        clean_tmp=True, reconfigure=True)
        if ret == common_mix.GlobalConstants.TEST_EXECUTION_ERROR:
            ERROR_HANDLER.error_exit("Program {}.".format(\
                                'LLVM built problematic'), __file__)

        # Update exe_map to reflect bitcode extension
        rel2bitcode = {}
        for r_file, b_file in list(rel_path_map.items()):
            bc = b_file + '.bc'
            ERROR_HANDLER.assert_true(os.path.isfile(bc), \
                                    "Bitcode file not existing: "+bc, __file__)
            rel2bitcode[r_file] = bc

        ERROR_HANDLER.assert_true(len(rel_path_map) == 1, \
                            "Support single bitcode module for now", __file__)

        bitcode_file = rel2bitcode[list(rel2bitcode.keys())[0]]

        # klee params
        bool_param, k_v_params = self._get_default_params()
        if max_time is not None:
            k_v_params['-max-time'] = str(max_time)

        args = [bp for bp, en in list(bool_param.items()) if en]
        for k, v in list(k_v_params.items()):
            if v is not None:
                args += [k, str(v)]
        args.append(bitcode_file)

        # sym args
        klee_sym_args = default_sym_args
        uc = self.config.get_tool_user_custom()
        if uc is not None:
            post_bc_cmd = uc.POST_TARGET_CMD_ORDERED_FLAGS_LIST
            if post_bc_cmd is not None:
                klee_sym_args = []
                for tup in post_bc_cmd:
                    klee_sym_args += list(tup)
        args += klee_sym_args

        # Execute Klee
        ret, out, err = DriversUtils.execute_and_get_retcode_out_err(\
                                                                    prog, args)

        if (ret != 0):
            logging.error(out)
            logging.error(err)
            logging.error("\n>> CMD: " + " ".join([prog] + args) + '\n')
            ERROR_HANDLER.error_exit("klee failed'", __file__)

        store_obj = {r: os.path.basename(b) for r, b in rel2bitcode.items()}
        common_fs.dumpJSON(store_obj, self.test_details_file)

    #~ def _do_generate_tests()


#~ class CustomTestcases