예제 #1
0
파일: main.py 프로젝트: Marvinmw/muteria
 def merge_lmatrix_into_right(lmatrix_file, rmatrix_file):
     if not os.path.isfile(rmatrix_file):
         shutil.copy2(lmatrix_file, rmatrix_file)
     else:
         lmatrix = common_matrices.ExecutionMatrix(filename=lmatrix_file)
         rmatrix = common_matrices.ExecutionMatrix(filename=rmatrix_file)
         rmatrix.update_with_other_matrix(lmatrix)
         rmatrix.serialize()
예제 #2
0
 def merge_lmatrix_into_right(lmatrix_file, rmatrix_file):
     if not os.path.isfile(rmatrix_file):
         shutil.copy2(lmatrix_file, rmatrix_file)
     else:
         lmatrix = common_matrices.ExecutionMatrix(filename=lmatrix_file)
         rmatrix = common_matrices.ExecutionMatrix(filename=rmatrix_file)
         rmatrix.update_with_other_matrix(lmatrix, override_existing=True, \
                                 ask_confirmation_with_exist_missing=True)
         rmatrix.serialize()
예제 #3
0
def load(resdir, fault_revealing=True):
    # load fault revealing tests
    fail_tests = None
    if fault_revealing:
        f_file = os.path.join(resdir, "fail_test_checking",
                              "fault_reveling_tests.txt")
        fail_tests = []
        with open(f_file) as f:
            for line in f:
                fail_tests.append(line.strip())

    # load all tests
    post_pf_file = os.path.join(resdir, "post", "RESULTS_DATA", "matrices",
                                "PASSFAIL.csv")
    post_pf_mat = common_matrices.ExecutionMatrix(post_pf_file)
    all_tests_post = post_pf_mat.get_nonkey_colname_list()
    pre_pf_file = os.path.join(resdir, "pre", "RESULTS_DATA", "matrices",
                               "PASSFAIL.csv")
    pre_pf_mat = common_matrices.ExecutionMatrix(pre_pf_file)
    all_tests_pre = pre_pf_mat.get_nonkey_colname_list()

    # Conider test thta are not flaky in both pre and post
    all_tests = list(set(all_tests_pre) & set(all_tests_post))

    # load execution outputs
    pre_orig_outlog_file = os.path.join(resdir, "pre", "RESULTS_DATA",
                                        "testexecution_outputs",
                                        "program_output.json")
    pre_muts_outlog_file = os.path.join(resdir, "pre", "RESULTS_DATA",
                                        "testexecution_outputs",
                                        "STRONG_MUTATION_output.json")
    post_orig_outlog_file = os.path.join(resdir, "post", "RESULTS_DATA",
                                         "testexecution_outputs",
                                         "program_output.json")
    post_muts_outlog_file = os.path.join(resdir, "post", "RESULTS_DATA",
                                         "testexecution_outputs",
                                         "STRONG_MUTATION_output.json")

    pre_orig_outlog = common_matrices.OutputLogData(pre_orig_outlog_file)
    pre_muts_outlog = common_matrices.OutputLogData(pre_muts_outlog_file)
    post_orig_outlog = common_matrices.OutputLogData(post_orig_outlog_file)
    post_muts_outlog = common_matrices.OutputLogData(post_muts_outlog_file)

    # Compute relevant mutants
    relevant_mutants_to_relevant_tests = get_relevant_mutants_to_relevant_tests(
        pre_orig_outlog, pre_muts_outlog, post_orig_outlog, post_muts_outlog)

    # load matrices and compute mutant killtest mapping
    post_sm_file = os.path.join(resdir, "post", "RESULTS_DATA", "matrices",
                                "STRONG_MUTATION.csv")
    post_sm_mat = common_matrices.ExecutionMatrix(post_sm_file)
    mutants_to_killingtests = post_sm_mat.query_active_columns_of_rows()
    tests_to_killed_mutants = post_sm_mat.query_active_rows_of_columns()

    # return data
    return all_tests, fail_tests, relevant_mutants_to_relevant_tests, mutants_to_killingtests, tests_to_killed_mutants
def getHardToPropagateMutants (strong_mutant_kill_matrix_file, \
                                weak_mutant_kill_matrix_file, \
                                threshold=0.10, \
                                selected_tests=None):
    """ Return the pair of propagation ratio and list of hard to propagate mutant 
        (strongly killed by less than threshold proportion of test
        that weakly kill them, 0 < threshold < 1 ).
    """

    # check threshold
    ERROR_HANDLER.assert_true(threshold > 0 and threshold < 1, \
                            "Invalid threshold, must be in interval (0,1)", \
                                                                __file__)

    # load matrix
    sm_matrix = \
            common_matrices.ExecutionMatrix(strong_mutant_kill_matrix_file)
    wm_matrix = \
            common_matrices.ExecutionMatrix(weak_mutant_kill_matrix_file)

    # get mutants_to_killing_tests
    sm_mutants_to_killing_tests = sm_matrix.query_active_columns_of_rows()
    wm_mutants_to_killing_tests = wm_matrix.query_active_columns_of_rows()

    all_tests = sm_matrix.get_nonkey_colname_list()

    ERROR_HANDLER.assert_true(set(sm_mutants_to_killing_tests) == \
                        set(wm_mutants_to_killing_tests), \
                            "strong and weak mutant killing matrices "
                            "have different mutants", __file__)
    ERROR_HANDLER.assert_true(set(all_tests) == \
                        set(wm_matrix.get_nonkey_colname_list()), \
                            "strong and weak mutant killing matrices "
                            "have different tests", __file__)

    if selected_tests is not None:
        _filter_out_tests(all_tests, selected_tests, \
                                                sm_mutants_to_killing_tests)
        _filter_out_tests(all_tests, selected_tests, \
                                                wm_mutants_to_killing_tests)
        all_tests = selected_tests
    
    dualkillratio = {}
    for mut, sm_killtests in sm_mutants_to_killing_tests.items():
        wm_killtests = wm_mutants_to_killing_tests[mut]
        if len(wm_killtests) == 0:
            dualkillratio[mut] = 1
        else:
            dualkillratio[mut] = len(sm_killtests) * 1.0 / len(wm_killtests)

    return dualkillratio, [mut for mut, h in dualkillratio.items() if h <= threshold]
def getHardToKillMutants (mutant_kill_matrix_file, threshold=0.025, \
                                                        selected_tests=None):
    """ Return the pair of kill ratio and list of hard to kill mutant (kill by less than threshold
        proportion of test, 0 < threshold < 1 ).
    """

    # check threshold
    ERROR_HANDLER.assert_true(threshold > 0 and threshold < 1, \
                            "Invalid threshold, must be in interval (0,1)", \
                                                                __file__)

    # load matrix
    matrix = common_matrices.ExecutionMatrix(mutant_kill_matrix_file)

    # get mutants_to_killing_tests
    mutants_to_killing_tests = matrix.query_active_columns_of_rows()

    all_tests = matrix.get_nonkey_colname_list()
    if selected_tests is not None:
        _filter_out_tests(all_tests, selected_tests, mutants_to_killing_tests)
        all_tests = selected_tests
    
    killratio = {}
    for mut, killtests in mutants_to_killing_tests.items():
        if len(killtests) == 0:
            killratio[mut] = 1
        else:
            killratio[mut] = len(killtests) * 1.0 / len(all_tests)

    return killratio, [mut for mut, h in killratio.items() if h <= threshold]
def getFaultRevealingMutants (strong_mutant_kill_matrix_file, \
                                expected_program_output_file, \
                                program_output_file, \
                                threshold=1.0, \
                                selected_tests=None):
    """
    This function compute the set of fault revealing mutants.
    
    The inputs are:
    - mutant kill matrix file, 
    - expected program output file, Used to see which test fails
    - obtained program output file, Used to see which test fails
    - threshold, in case a relaxed fault revealing is looked for
    - selected tests, in case part of the tests should be used
    
    :return: A pair is returned, with first element the set of fault revealing
            Mutants, and second element, a dict with key the mutants and values
            the fault revelation ratio 
            ('# test kill and find fault' divided (/) '# test that kill')
            For equivalent mutants, the division isinvalid, 
            we set the value to -1
    """
    prog_out = common_matrices.OutputLogData(filename=program_output_file)
    exp_prog_out = common_matrices.OutputLogData(\
                                            filename=expected_program_output_file)
    _, prog_out_uniq = list(prog_out.get_zip_objective_and_data())[0]
    _, exp_prog_out_uniq = list(exp_prog_out.get_zip_objective_and_data())[0]
    
    if set(prog_out_uniq) != set(exp_prog_out_uniq):
        logging.warning("Test mismatch between program output and expected!")
    intersect = set(prog_out_uniq) & set(exp_prog_out_uniq)
    fault_tests = set()
    for elem in intersect:
        ol_equiv = common_matrices.OutputLogData.outlogdata_equiv(\
                                    prog_out_uniq[elem], exp_prog_out_uniq[elem])
        if not ol_equiv:
            ERROR_HANDLER.assert_true (elem not in fault_tests, \
                                                        "duplicate test", __file__)
            fault_tests.add(elem)
            
    # get mutant to killing test dict
    kill_matrix = common_matrices.ExecutionMatrix(\
                                          filename=strong_mutant_kill_matrix_file)
    mut_to_killtests = kill_matrix.query_active_columns_of_rows()
    
    # remove unselected tests
    if selected_tests is not None:
        selected_tests = set(selected_tests)
        for mut, tests in mut_to_kiltests.items():
            mut_to_killtests[mut] = set(tests) & selected_tests
    
    mutant_to_fr = {}
    mut, tests in mut_to_killtests.items():
        kill_fr = len(tests & fault_tests)
        kill_all = len(tests)
        if kill_all > 0: 
            # Killable
            mutant_to_fr[mut] = -1.0
        else:
            mutant_to_fr[mut] = kill_fr * 1.0 / kill_all
예제 #7
0
def fault_analysis(cm_corebench_scripts_dir, c_id, conf_py, in_muteria_outdir,
                   out_top_dir):
    if not os.path.isdir(out_top_dir):
        os.mkdir(out_top_dir)

    in_res_data_dir = os.path.join(in_muteria_outdir, 'latest', 'RESULTS_DATA')
    testtools_workdir = os.path.join(in_muteria_outdir, 'latest',
                                     'testscases_workdir')
    pass_fail_matrix = os.path.join(in_res_data_dir, "matrices",
                                    "PASSFAIL.csv")

    pf_mat = common_matrices.ExecutionMatrix(filename=pass_fail_matrix)
    test_list = list(pf_mat.get_nonkey_colname_list())
    #semu_tool_dirs = [d for d in os.listdir(testtools_workdir) if d.startswith('semu_cmp-')]

    # get fault tests
    get_commit_fault_tests(cm_corebench_scripts_dir, c_id, conf_py,
                           in_res_data_dir, out_top_dir)

    # get test to timestamp
    test_timestamp_file = os.path.join(out_top_dir, "test_to_timestamp.json")
    test2timestamp = {}
    ## get tests by tools
    tools2tests = {}
    for test in test_list:
        alias, _ = DriversUtils.reverse_meta_element(test)
        # XXX: only SEMU
        if not alias.startswith('semu_cmp-'):
            continue
        if alias not in tools2tests:
            tools2tests[alias] = set()
        tools2tests[alias].add(test)
    ## untar tests dir
    for alias, tests in tools2tests.items():
        d = os.path.join(testtools_workdir, alias)
        assert os.path.isdir(
            d), "test tool dir " + d + " missing for alias " + alias
        test_tar = os.path.join(d, 'tests_files.tar.gz')
        es = common_fs.TarGz.decompressDir(test_tar)
        assert es is None, "decompress error: " + es
        tests_files = os.path.join(d, 'tests_files')
        assert os.path.isdir(tests_files), "dir missing after decompress"
        for test in tests:
            _, simple_test = DriversUtils.reverse_meta_element(test)
            gt = TestcasesToolSemu._get_generation_time_of_test(
                simple_test, tests_files)
            test2timestamp[test] = gt
        shutil.rmtree(tests_files)

    common_fs.dumpJSON(test2timestamp, test_timestamp_file, pretty=True)
예제 #8
0
파일: main.py 프로젝트: Marvinmw/muteria
    def compute_stats(config, explorer):
        # get the matrix of each test criterion
        coverages = {}
        total_to = {}
        for c in config.ENABLED_CRITERIA.get_val():
            if explorer.file_exists(fd_structure.CRITERIA_MATRIX[c]):
                mat_file = explorer.get_existing_file_pathname(\
                                            fd_structure.CRITERIA_MATRIX[c])
                mat = common_matrices.ExecutionMatrix(filename=mat_file)
                row2collist = mat.query_active_columns_of_rows()
                cov = len([k for k, v in row2collist.items() if len(v) > 0])
                tot = len(row2collist)
                coverages[c.get_str()] = '{:.2f}'.format(cov * 100.0 / tot)
                total_to[c.get_str()] = tot

        # JSON
        out_json = {}
        for c in coverages:
            out_json[c] = {
                'coverage': coverages[c],
                '# test objectives': total_to[c]
            }
        common_fs.dumpJSON(out_json, explorer.get_file_pathname(\
                                            fd_structure.STATS_MAIN_FILE_JSON))

        # HTML
        template_file = os.path.join(os.path.dirname(\
                            os.path.abspath(__file__)), 'summary_report.html')
        report_file = explorer.get_file_pathname(\
                                            fd_structure.STATS_MAIN_FILE_HTML)
        rendered = Template(open(template_file).read()).render( \
                                {'coverages':coverages, 'total_to':total_to})
        with open(report_file, 'w') as f:
            f.write(rendered)

        try:
            webbrowser.get()
            webbrowser.open('file://' + report_file, new=2)
        except Exception as e:
            logging.warning("webbrowser error: " + str(e))

    #~ def compute_stats()


#~ class DataHandling
예제 #9
0
def load(matrice_file):
    # load matrices and compute mutant killtest mapping
    sm_mat = common_matrices.ExecutionMatrix(matrice_file)

    print("[{}] Getting mutants_to_killingtests ...".format(
        datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
    mutants_to_killingtests = sm_mat.query_active_columns_of_rows()

    print("[{}] Getting tests_to_killed_mutants ...".format(
        datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
    tests_to_killed_mutants = sm_mat.query_active_rows_of_columns()

    all_tests = sm_mat.get_nonkey_colname_list()

    print("[{}] Loaded".format(
        datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))

    # return data
    return all_tests, mutants_to_killingtests, tests_to_killed_mutants
def getSubsumingMutants (mutant_kill_matrix_file, clustered=True, \
                                                        selected_tests=None):
    """ Get the subsuming mutants from the matrix file

        :returns: a tuple of (1) list of equivalent mutants
                    (2) the list of tuple of subsuming mutant (Each tuple
                    contain the mutants that are subsuming each others) or
                    list of all subsuming mutants.
    """
    # load matrix
    matrix = common_matrices.ExecutionMatrix(mutant_kill_matrix_file)

    # get mutants_to_killing_tests
    mutants_to_killing_tests = matrix.query_active_columns_of_rows()

    if selected_tests is not None:
        _filter_out_tests(matrix.get_nonkey_colname_list(), selected_tests, \
                                                mutants_to_killing_tests)
    return algorithms.getSubsumingMutants(mutants_to_killing_tests, \
                                                        clustered=clustered)
예제 #11
0
    def compute_stats(config, explorer, checkpointer):
        # get the matrix of each test criterion
        coverages = {}
        total_to = {}
        number_of_testcases = None
        for c in config.ENABLED_CRITERIA.get_val():
            if explorer.file_exists(fd_structure.CRITERIA_MATRIX[c]):
                mat_file = explorer.get_existing_file_pathname(\
                                            fd_structure.CRITERIA_MATRIX[c])
                mat = common_matrices.ExecutionMatrix(filename=mat_file)
                row2collist = mat.query_active_columns_of_rows()
                cov = len([k for k,v in row2collist.items() if len(v) > 0])
                tot = len(row2collist)
                coverages[c.get_str()] = 'n.a.' if tot == 0 else \
                                          '{:.2f}'.format(cov * 100.0 / tot)
                total_to[c.get_str()] = tot
                if number_of_testcases is None:
                    number_of_testcases = len(mat.get_nonkey_colname_list())
        
        # JSON
        out_json = {}
        out_json['TOTAL EXECUTION TIME (s)'] = \
                                            checkpointer.get_execution_time()
        out_json['NUMBER OF TESTCASES'] = number_of_testcases
        out_json['CRITERIA'] = {}
        for c in coverages:
            out_json['CRITERIA'][c] = {'coverage': coverages[c], 
                                            '# test objectives': total_to[c]}
        common_fs.dumpJSON(out_json, explorer.get_file_pathname(\
                                        fd_structure.STATS_MAIN_FILE_JSON), \
                                     pretty=True)

        # HTML
        template_file = os.path.join(os.path.dirname(\
                            os.path.abspath(__file__)), 'summary_report.html')
        report_file = explorer.get_file_pathname(\
                                            fd_structure.STATS_MAIN_FILE_HTML)
        
        def format_execution_time(exec_time):
            n_day = int(exec_time // (24 * 3600))
            exec_time = exec_time % (24 * 3600)
            n_hour = int(exec_time // 3600)
            exec_time %= 3600
            n_minutes = int(exec_time // 60)
            exec_time %= 60
            n_seconds = int(round(exec_time))

            res = ""
            for val, unit in [(n_day, 'day'), (n_hour, 'hour'), \
                                (n_minutes, 'minutes'), (n_seconds, 'second')]:
                if val > 0:
                    s = ' ' if val == 1 else 's '
                    res += str(val) + ' ' + unit + s
            
            return res
        #~ def format_execution_time()

        total_exec_time = format_execution_time(\
                                            checkpointer.get_execution_time())

        rendered = Template(open(template_file).read()).render( \
                                {
                                    'total_execution_time': total_exec_time,
                                    'number_of_testcases': number_of_testcases,
                                    'coverages':coverages, 
                                    'total_to':total_to,
                                })
        with open(report_file, 'w') as f:
            f.write(rendered)
        
        try:
            webbrowser.get()
            webbrowser.open('file://' + report_file,new=2)
        except Exception as e:
            logging.warning("webbrowser error: "+str(e))
예제 #12
0
def get_subsuming_elements(matrix_file):
    mat = common_matrices.ExecutionMatrix(filename=matrix_file)
    elem_to_tests = mat.query_active_columns_of_rows()
    equiv, subs_clusters = stats_algo.getSubsumingMutants(\
                                        elem_to_tests, clustered=True)
    return equiv, subs_clusters
예제 #13
0
    def update_matrix_to_cover_when_difference(cls, \
                                target_matrix_file, target_outdata_file, \
                                comparing_vector_file, comparing_outdata_file):
        ERROR_HANDLER.assert_true(target_matrix_file is not None \
                                and comparing_vector_file is not None, \
                                "target or comparing matrix is None", __file__)

        target_matrix = common_matrices.ExecutionMatrix(\
                                                filename=target_matrix_file)
        comparing_vector = common_matrices.ExecutionMatrix(\
                                                filename=comparing_vector_file)

        ERROR_HANDLER.assert_true(len(\
                            set(target_matrix.get_nonkey_colname_list()) - \
                            set(comparing_vector.get_nonkey_colname_list()) \
                                     ) == 0, "Mismatch of columns", __file__)

        # Get uncertain
        target_uncertain_cols_dict = \
                                target_matrix.query_uncertain_columns_of_rows()
        vector_uncertain_cols = \
                            comparing_vector.query_uncertain_columns_of_rows()
        vector_uncertain_cols = \
                    set(vector_uncertain_cols[list(vector_uncertain_cols)[0]])

        # Check if outdata and proceed accordingly
        if target_outdata_file is not None and \
                                            comparing_outdata_file is not None:
            # outdata are set use outdata values to decide of differences
            target_outdata = common_matrices.OutputLogData(\
                                                filename=target_outdata_file)
            vector_outdata = common_matrices.OutputLogData(\
                                            filename=comparing_outdata_file)

            _, vector_outdata_uniq = list(\
                                vector_outdata.get_zip_objective_and_data())[0]

            ## Compare using output
            timedout_key = common_matrices.OutputLogData.TIMEDOUT
            key_to_diffs = {}
            for key, key_data in target_outdata.get_zip_objective_and_data():
                intersect = set(vector_outdata_uniq) & set(key_data)
                ERROR_HANDLER.assert_true(set(key_data) == intersect, \
                            "The elements in target must all be in vector",\
                                                                    __file__)
                key_to_diffs[key] = set()
                for elem in intersect:
                    ol_equiv = common_matrices.OutputLogData.outlogdata_equiv(\
                                    key_data[elem], vector_outdata_uniq[elem])
                    if not ol_equiv:
                        key_to_diffs[key].add(elem)
        else:
            # outdata is not set use difference of matrices
            ## obtain the active cols for each rows
            target_active_cols_dict = \
                                target_matrix.query_active_columns_of_rows()

            vector_active_cols = \
                                comparing_vector.query_active_columns_of_rows()
            vector_active_cols = \
                        set(vector_active_cols[list(vector_active_cols)[0]])

            ## for each row of target matrix, get diff
            target_matrix_allcol = set(target_matrix.get_nonkey_colname_list())
            key_to_diffs = {}
            for row_key, row_active in target_active_cols_dict.items():
                key_to_diffs[row_key] = (set(row_active) - vector_active_cols)\
                                    | (vector_active_cols - set(row_active))
                key_to_diffs[row_key] &= target_matrix_allcol

        # Clear matrix to inactive
        target_matrix.clear_cells_to_value(target_matrix.getInactiveCellVal())

        # Update matrix based on diff
        for key, diffs in key_to_diffs.items():
            # gather the uncertain and set to uncertain
            uncertain = vector_uncertain_cols | \
                                        set(target_uncertain_cols_dict[key])
            set_uncertain = uncertain & diffs
            diffs -= uncertain

            values = {}
            for col in set_uncertain:
                values[col] = target_matrix.getUncertainCellDefaultVal()
            for col in diffs:
                values[col] = target_matrix.getActiveCellDefaultVal()

            if len(values) > 0:
                target_matrix.update_cells(key, values)

        target_matrix.serialize()
예제 #14
0
파일: semu.py 프로젝트: thierry-tct/muteria
    def _get_input_bitcode_file(self, code_builds_factory, rel_path_map, \
                                                meta_criteria_tool_obj=None):
        meta_mu_src = self.driver_config.get_meta_mutant_source()

        # XXX Case of manual annotation
        if meta_mu_src == MetaMuSource.ANNOTATION:
            with open(self.cand_muts_file, 'w') as f:
                # Single mutant (id 1, corresponding to old version)
                f.write(str(1) + '\n')
            return super(TestcasesToolSemu, self)._get_input_bitcode_file(\
                                        code_builds_factory, rel_path_map, \
                                meta_criteria_tool_obj=meta_criteria_tool_obj)
        if type(meta_mu_src) == str:
            # XXX: The actual path to the meta is specified
            return meta_mu_src

        # XXX: Case of other mutation tools like Mart
        # get the meta criterion file from MART or any compatible tool.
        mutant_gen_tool_name = meta_mu_src.get_field_value()
        mut_tool_alias_to_obj = \
                            meta_criteria_tool_obj.get_criteria_tools_by_name(\
                                                        mutant_gen_tool_name)

        if len(mut_tool_alias_to_obj) == 0:
            logging.warning(\
                'SEMu requires {} to generate mutants but none used'.format(\
                                                        mutant_gen_tool_name))

        ERROR_HANDLER.assert_true(len(mut_tool_alias_to_obj) == 1, \
                                "SEMu supports tests generation from"
                                "a single .bc file for now (todo).", __file__)

        t_alias2metamu_bc = {}
        t_alias2mutantInfos = {}
        for alias, obj in mut_tool_alias_to_obj.items():
            dest_bc = rel_path_map[list(rel_path_map)[0]] + '.bc'
            shutil.copy2(obj.get_test_gen_metamutant_bc(), dest_bc)
            t_alias2metamu_bc[alias] = dest_bc
            t_alias2mutantInfos[alias] = obj.get_criterion_info_object(None)

        # XXX: get mutants ids by functions
        self.mutants_by_funcs = {}
        single_alias = list(t_alias2mutantInfos)[0]
        single_tool_obj = t_alias2mutantInfos[single_alias]

        cand_muts = list(single_tool_obj.get_elements_list())

        for mut in single_tool_obj.get_elements_list():
            func = single_tool_obj.get_element_data(mut)[\
                                                        'mutant_function_name']
            #meta_mut = DriversUtils.make_meta_element(mut, single_alias)
            if func not in self.mutants_by_funcs:
                self.mutants_by_funcs[func] = set()
            self.mutants_by_funcs[func].add(mut)  #meta_mut)

        # XXX: get candidate mutants list
        if self.driver_config.get_target_only_live_mutants() \
                                        and os.path.isfile(self.sm_mat_file):
            sm_mat = common_matrices.ExecutionMatrix(\
                                                filename=self.sm_mat_file)
            mut2killing_tests = sm_mat.query_active_columns_of_rows()
            alive_muts = [m for m, k_t in mut2killing_tests.items() \
                                                        if len(k_t) == 0]
            cand_muts = []
            for meta_m in alive_muts:
                t_alias, m = DriversUtils.reverse_meta_element(meta_m)
                if t_alias in t_alias2metamu_bc:  # There is a single one
                    cand_muts.append(m)

        with open(self.cand_muts_file, 'w') as f:
            for m in cand_muts:
                f.write(str(m) + '\n')

        return t_alias2metamu_bc[list(t_alias2metamu_bc)[0]]
예제 #15
0
def _get_fault_tests(cm_corebench_scripts_dir,
                     c_id,
                     conf_py,
                     in_res_data_dir,
                     outdir,
                     get_difference=True):
    if not os.path.isdir(outdir):
        os.mkdir(outdir)

    exe_dir = os.path.join(cm_corebench_scripts_dir, "bug_fixing_exes", c_id)
    exe_file = os.listdir(os.path.join(exe_dir, "old"))
    if len(exe_file) != 1:
        error_exit("not one file for old")
    exe_file = exe_file[0]

    fail_test_execution = os.path.join(outdir, "fail_test_checking")
    if os.path.isdir(fail_test_execution):
        shutil.rmtree(fail_test_execution)
    if not os.path.isdir(fail_test_execution):
        os.makedirs(fail_test_execution)

    bug_finding_tests_list = os.path.join(fail_test_execution,
                                          "fault_reveling_tests.txt")

    # temporary
    test_list_file = os.path.join(fail_test_execution, "test_list.tmp")

    pass_fail_matrix = os.path.join(in_res_data_dir, "matrices",
                                    "PASSFAIL.csv")

    pf_mat = common_matrices.ExecutionMatrix(filename=pass_fail_matrix)
    with open(test_list_file, "w") as f:
        for test in pf_mat.get_nonkey_colname_list():
            f.write(test + "\n")

    nohash = ['--nohashoutlog']
    #nohash = []

    print("# info: running bug-fix old ...")
    version = "old"
    custom_exe = os.path.join(exe_dir, version, exe_file)
    stdin = "{}\n{}\n{}\n{}\n".format(
        "tests", os.path.join(fail_test_execution, version),
        '{"src/' + exe_file + '": "' + custom_exe + '"}', test_list_file)
    if os.system(" ".join([
            "printf", "'" + stdin + "'", "|", "muteria", "--config", conf_py,
            "--lang", "c", "customexec"
    ] + nohash)) != 0:
        assert False, "bug-fix old failed"

    print("# info: running bug-fix new ...")
    version = "new"
    custom_exe = os.path.join(exe_dir, version, exe_file)
    stdin = "{}\n{}\n{}\n{}\n".format(
        "tests", os.path.join(fail_test_execution, version),
        '{"src/' + exe_file + '": "' + custom_exe + '"}', test_list_file)
    if os.system(" ".join([
            "printf", "'" + stdin + "'", "|", "muteria", "--config", conf_py,
            "--lang", "c", "customexec"
    ] + nohash)) != 0:
        assert False, "bug-fix new failed"

    _extract_list(fail_test_execution, bug_finding_tests_list)

    if get_difference:
        # get differences in bug introducing
        ## get diff exe file
        diff_exe_dir = os.path.join(os.path.dirname(in_res_data_dir),
                                    'code_build_workdir')
        map_file_ = os.path.join(diff_exe_dir, 'files_map')
        map_ = common_fs.loadJSON(map_file_)
        diff_exe_file = None
        for f, dest in map_.items():
            if os.path.basename(f) == exe_file:
                assert diff_exe_file is None, "multiple exefile found in defference"
                diff_exe_file = os.path.join(diff_exe_dir, dest)
                assert os.path.isfile(
                    diff_exe_file), "missing diff_exe_file: " + diff_exe_file
        assert diff_exe_file is not None, "diff exe file not found"
        ## Execution
        diff_test_execution = os.path.join(outdir, "diff_test_checking")
        diff_finding_tests_list = os.path.join(diff_test_execution,
                                               "diff_reveling_tests.txt")
        if os.path.isdir(diff_test_execution):
            shutil.rmtree(diff_test_execution)
        if not os.path.isdir(diff_test_execution):
            os.makedirs(diff_test_execution)

        print("# info: running bug-intro old ...")
        version = "old"
        stdin = "{}\n{}\n{}\n{}\n".format(
            "tests", os.path.join(diff_test_execution, version),
            '{"src/' + exe_file + '": "' + diff_exe_file + '"}',
            test_list_file)
        if os.system(" ".join([
                "printf", "'" + stdin +
                "'", "|", "KLEE_CHANGE_RUNTIME_SET_OLD_VERSION=on", "muteria",
                "--config", conf_py, "--lang", "c", "customexec"
        ] + nohash)) != 0:
            assert False, "bug-intro old failed"

        print("# info: running bug-intro new ...")
        version = "new"
        stdin = "{}\n{}\n{}\n{}\n".format(
            "tests", os.path.join(diff_test_execution, version),
            '{"src/' + exe_file + '": "' + diff_exe_file + '"}',
            test_list_file)
        if os.system(" ".join([
                "printf", "'" + stdin + "'", "|", "muteria", "--config",
                conf_py, "--lang", "c", "customexec"
        ] + nohash)) != 0:
            assert False, "bug-intro new failed"

        _extract_list(diff_test_execution, diff_finding_tests_list)

    os.remove(test_list_file)
예제 #16
0
    def _call_generation_run(self, runtool, args):
        # Delete any klee-out-*
        for d in os.listdir(self.tests_working_dir):
            if d.startswith('klee-out-'):
                shutil.rmtree(os.path.join(self.tests_working_dir, d))

        call_shadow_wrapper_file = os.path.join(self.tests_working_dir, \
                                                                "shadow_wrap")

        devtest_toolalias = self.parent_meta_tool.get_devtest_toolalias()
        ERROR_HANDLER.assert_true(devtest_toolalias is not None, \
                        "devtest must be used when using shadow_se", __file__)

        #test_list = list(self.code_builds_factory.repository_manager\
        #                                               .get_dev_tests_list())
        test_list = []
        for meta_test in self.parent_meta_tool.get_testcase_info_object(\
                               candidate_tool_aliases=[devtest_toolalias])\
                                                            .get_tests_list():
            toolalias, test = DriversUtils.reverse_meta_element(meta_test)
            ERROR_HANDLER.assert_true(toolalias == devtest_toolalias, \
                           "BUG in above get_testcase_info_object", __file__)
            test_list.append(test)

        # Get list of klee_change, klee_get_true/false locations.
        klee_change_stmts = []

        get_lines_callback_obj = self.GetLinesCallbackObject()
        get_lines_callback_obj.set_pre_callback_args(self.code_builds_factory\
                                    .repository_manager.revert_src_list_files)
        get_lines_callback_obj.set_post_callback_args(klee_change_stmts)

        pre_ret, post_ret = self.code_builds_factory.repository_manager\
                                    .custom_read_access(get_lines_callback_obj)
        ERROR_HANDLER.assert_true(pre_ret == \
                                common_mix.GlobalConstants.COMMAND_SUCCESS,\
                                                    "pre failed", __file__)
        ERROR_HANDLER.assert_true(post_ret == \
                                common_mix.GlobalConstants.COMMAND_SUCCESS,\
                                                    "post failed", __file__)

        ERROR_HANDLER.assert_true(len(klee_change_stmts) > 0, \
                        "No klee_change statement in the sources", __file__)

        # Filter only tests that cover those locations,
        # if there is stmt coverage matrix
        stmt_cov_mat_file = self.head_explorer.get_file_pathname(\
                            fd_structure.CRITERIA_MATRIX[criteria.TestCriteria\
                                                        .STATEMENT_COVERAGE])
        cov_tests = None
        if os.path.isfile(stmt_cov_mat_file):
            stmt_cov_mat = common_matrices.ExecutionMatrix(\
                                                    filename=stmt_cov_mat_file)
            # due to possible wrapper test splitting we update test_list here
            tmp_test_list = []
            for mt in stmt_cov_mat.get_nonkey_colname_list():
                alias, t = DriversUtils.reverse_meta_element(mt)
                if alias == devtest_toolalias:
                    tmp_test_list.append(t)
            test_list = tmp_test_list

            meta_stmts = list(stmt_cov_mat.get_keys())
            tool_aliases = set()
            for meta_stmt in meta_stmts:
                alias, stmt = DriversUtils.reverse_meta_element(meta_stmt)
                tool_aliases.add(alias)
            klee_change_meta_stmts = []
            for alias in tool_aliases:
                klee_change_meta_stmts += [\
                                    DriversUtils.make_meta_element(e, alias) \
                                                    for e in klee_change_stmts]
            klee_change_meta_stmts = list(set(meta_stmts) & \
                                                set(klee_change_meta_stmts))

            cov_tests = set()
            if len(klee_change_meta_stmts) > 0:
                for _, t in stmt_cov_mat.query_active_columns_of_rows(\
                                row_key_list=klee_change_meta_stmts).items():
                    cov_tests |= set(t)
            else:
                logging.warning('No test covers the patch (SHADOW)!')
            #    ERROR_HANDLER.assert_true(len(klee_change_meta_stmts) > 0, \
            #                            "No test covers the patch", __file__)

        # tests will be generated in the same dir that has the input .bc file
        os.mkdir(self.tests_storage_dir)

        # obtain candidate tests
        cand_testpair_list = []
        for test in test_list:
            meta_test = DriversUtils.make_meta_element(test, devtest_toolalias)
            if cov_tests is not None and meta_test not in cov_tests:
                continue
            cand_testpair_list.append((test, meta_test))

        # Adjust the max-time in args
        ## locate max-time
        per_test_hard_timeout = None
        per_test_timeout = None
        if len(cand_testpair_list) > 0:
            cur_max_time = float(self.get_value_in_arglist(args, 'max-time'))
            if self.driver_config.get_gen_timeout_is_per_test():
                per_test_timeout = cur_max_time
            else:
                per_test_timeout = max(60, \
                                       cur_max_time / len(cand_testpair_list))
            self.set_value_in_arglist(args, 'max-time', str(per_test_timeout))

            # give time to dump remaning states
            per_test_hard_timeout = per_test_timeout + \
                                self.config.TEST_GEN_TIMEOUT_FRAMEWORK_GRACE

        #per_test_hard_timeout = 300 #DBG
        # Set the wrapper
        with open(call_shadow_wrapper_file, 'w') as wf:
            wf.write('#! /bin/bash\n\n')
            wf.write('set -u\n')
            wf.write('set -o pipefail\n\n')
            wf.write('ulimit -s unlimited\n')

            # timeout the shadow execution (some test create daemon which)
            # are not killed by test timeout. ALSO MAKE SURE TO DESACTIVATE
            # IN TEST SCRIPT TIMEOUT
            kill_after = 30
            wf.write('time_out_cmd="/usr/bin/timeout --kill-after={}s {}"\n'.\
                                     format(kill_after, per_test_hard_timeout))
            # kill after and time for timeout to act
            per_test_hard_timeout += kill_after + 60

            #wf.write(' '.join(['exec', runtool] + args + ['"${@:1}"']) + '\n')
            wf.write('\nstdindata="{}/klee-last/{}"\n'.format(\
                                                    self.tests_working_dir, \
                                        KTestTestFormat.STDIN_KTEST_DATA_FILE))
            wf.write('tmpstdindata="{}/{}"\n\n'.format(self.tests_working_dir,\
                                        KTestTestFormat.STDIN_KTEST_DATA_FILE))
            wf.write('if [ -t 0 ] # check if stdin do not exist\n')
            wf.write('then\n')
            wf.write(' '.join(['\t(', '$time_out_cmd', runtool] + args + \
                                    ['"${@:1}"', ') ; EXIT_CODE=$?', '\n']))
            wf.write('\t/usr/bin/touch $tmpstdindata\n')

            wf.write('else\n')
            wf.write('\t(/bin/cat - > $tmpstdindata ) || EXIT_CODE=1\n')
            wf.write(' '.join(['\t(', '/bin/cat $tmpstdindata | ', \
                            '$time_out_cmd', runtool] + args + \
                                ['"${@:1}"', ') ; EXIT_CODE=$?', '\n']))
            wf.write('fi\n\n')
            wf.write('/bin/mv $tmpstdindata $stdindata || EXIT_CODE=2\n')
            wf.write('\n# Provoke "unbound variable" if KLEE fails\n')
            wf.write('# Preserve the KLEE exit code\n')
            wf.write('exit $EXIT_CODE\n')
        os.chmod(call_shadow_wrapper_file, 0o775)

        # run test
        exes, _ = self.code_builds_factory.repository_manager\
                                                .get_relative_exe_path_map()
        ERROR_HANDLER.assert_true(len(exes) == 1, \
                                            "Must have a single exe", __file__)
        exe_path_map = {e: call_shadow_wrapper_file for e in exes}
        env_vars = {}
        self._dir_chmod777(self.tests_storage_dir)
        for test, meta_test in cand_testpair_list:
            self.parent_meta_tool.execute_testcase(meta_test, exe_path_map, \
                                    env_vars, timeout=per_test_hard_timeout,\
                                                    with_output_summary=False)

            #logging.debug("DBG: Just executed test '{}'".format(meta_test))
            #input(">>>> ") #DBG

            # copy the klee out
            test_out = os.path.join(self.tests_storage_dir, \
                                          self.get_sorage_name_of_test(test))
            os.mkdir(test_out)
            for d in glob.glob(self.tests_working_dir + "/klee-out-*"):
                # make sure we can do anything with it
                self._dir_chmod777(d)
                if not self.keep_first_test:
                    first_test = os.path.join(d, 'test000001.ktest')
                    if os.path.isfile(first_test):
                        shutil.move(first_test, first_test + '.disable')
                shutil.move(d, test_out)
            ERROR_HANDLER.assert_true(len(list(os.listdir(test_out))) > 0, \
                                "Shadow generated no test for tescase: "+test,\
                                                                    __file__)
            if os.path.islink(os.path.join(self.tests_working_dir, \
                                                                'klee-last')):
                os.unlink(os.path.join(self.tests_working_dir, 'klee-last'))

        # store klee_change locs
        common_fs.dumpJSON(klee_change_stmts, self.klee_change_locs_list_file)
예제 #17
0
    def runtests(self, meta_testcases=None, exe_path_map=None, env_vars=None, \
                        stop_on_failure=False, \
                        per_test_timeout=None, \
                        use_recorded_timeout_times=None, \
                        recalculate_execution_times=False, \
                        fault_test_execution_matrix_file=None, \
                        fault_test_execution_execoutput_file=None, \
                        with_outlog_hash=True, \
                        test_prioritization_module=None, \
                        parallel_test_count=1, \
                        parallel_test_scheduler=None, \
                        restart_checkpointer=False,
                        finish_destroy_checkpointer=True):
        '''
        Execute the list of test cases with the given executable and 
        say, for each test case, whether it failed

        :param meta_testcases: list of test cases to execute
        :param exe_path_map: string representing the file system path to 
                        the executable to execute with the tests
        :param env_vars: dict of environment variables to set before
                        executing each test ({<variable>: <value>})
        :param stop_on_failure: decide whether to stop the test 
                        execution once a test fails
        :param fault_test_execution_matrix_file: Optional matrix file 
                        to store the tests' pass fail execution data
        :param fault_test_execution_execoutput_file: Optional output log file 
                        to store the tests' execution actual output (hashed)
        :param with_outlog_hash: decide whether to return outlog hash 
        :param test_prioritization_module: Specify the test prioritization
                        module. 
                        (TODO: Implement support)
        :param parallel_test_count: Specify the number of parallel test
                        Execution. must be an integer >= 1
        :param parallel_test_scheduler: Specify the function that will
                        handle parallel test scheduling by tool, using
                        the test execution optimizer. 
                        (TODO: Implement support)

        :type restart_checkointer: bool
        :param restart_checkointer: Decide whether to discard checkpoint
                        and restart anew.

        :type finish_destroy_checkpointer: bool
        :param finish_destroy_checkpointer: Decide whether to automatically 
                        destroy the checkpointer when done or not
                        Useful is caller has a checkpointer to update. 

        :returns: dict of testcase and their failed verdict.
                 {<test case name>: <True if failed, False if passed,
                    UNCERTAIN_TEST_VERDICT if uncertain>}
                 If stop_on_failure is True, only return the tests that 
                 have been executed until the failure
        '''

        # FIXME: Make sure that the support are implemented for
        # parallelism and test prioritization. Remove the code bellow
        # once supported:
        ERROR_HANDLER.assert_true(test_prioritization_module is None, \
                        "Must implement test prioritization support here", \
                                                                    __file__)
        ERROR_HANDLER.assert_true(parallel_test_count <= 1, \
                    "Must implement parallel tests execution support here", \
                                                                    __file__)
        ERROR_HANDLER.assert_true(parallel_test_scheduler is None, \
                    "Must implement parallel tests execution support here", \
                                                                    __file__)
        #~FIXMEnd

        # Check arguments Validity
        if exe_path_map is None:
            exe_path_map = self._get_default_exe_path_map()

        ERROR_HANDLER.assert_true(parallel_test_count > 0, \
                    "invalid parallel test execution count: {}. {}".format( \
                                    parallel_test_count, "must be >= 1"))

        # @Checkpoint: create a checkpoint handler
        cp_func_name = "runtests"
        cp_task_id = 1
        checkpoint_handler = \
                CheckPointHandler(self.get_checkpoint_state_object())
        if restart_checkpointer:
            checkpoint_handler.restart()
        if checkpoint_handler.is_finished():
            logging.warning("%s %s" %("The function 'runtests' is finished", \
                "according to checkpoint, but called again. None returned"))
            if common_mix.confirm_execution("%s %s" % ( \
                                        "Function 'runtests' is already", \
                                        "finished, do you want to restart?")):
                checkpoint_handler.restart()
                logging.info("Restarting the finished 'runtests'")
            else:
                ERROR_HANDLER.error_exit(err_string="%s %s %s" % (\
                        "Execution halted. Cannot continue because no value", \
                        " can be returned. Check the results of the", \
                        "finished execution"), call_location=__file__)

        # @Checkpoint: Get the saved payload (data kapt for each tool)
        # pair list of testfailed verdict and execution output
        meta_test_failedverdicts_outlog = \
                                    checkpoint_handler.get_optional_payload()
        if meta_test_failedverdicts_outlog is None:
            meta_test_failedverdicts_outlog = [{}, {}]

        # Make sure the tests are unique
        ERROR_HANDLER.assert_true(len(meta_testcases) == \
                                                len(set(meta_testcases)), \
                                        "not all tests are unique", __file__)

        testcases_by_tool = {}
        for meta_testcase in meta_testcases:
            ttoolalias, testcase = \
                            DriversUtils.reverse_meta_element(meta_testcase)
            if ttoolalias not in testcases_by_tool:
                testcases_by_tool[ttoolalias] = []
            testcases_by_tool[ttoolalias].append(testcase)

        found_a_failure = False
        for tpos, ttoolalias in enumerate(testcases_by_tool.keys()):
            # @Checkpoint: Check whether already executed
            if not checkpoint_handler.is_to_execute(func_name=cp_func_name, \
                                                taskid=cp_task_id, \
                                                tool=ttoolalias):
                continue

            # Actual execution
            ttool = \
                self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY]
            test_failed_verdicts, test_execoutput = ttool.runtests( \
                                            testcases_by_tool[ttoolalias], \
                                            exe_path_map, env_vars, \
                                            stop_on_failure, \
                                            per_test_timeout=per_test_timeout,
                                            use_recorded_timeout_times=\
                                                use_recorded_timeout_times, \
                                            recalculate_execution_times=\
                                                recalculate_execution_times, \
                                            with_outlog_hash=with_outlog_hash)
            for testcase in test_failed_verdicts:
                meta_testcase =  \
                        DriversUtils.make_meta_element(testcase, ttoolalias)
                meta_test_failedverdicts_outlog[0][meta_testcase] = \
                                                test_failed_verdicts[testcase]
                meta_test_failedverdicts_outlog[1][meta_testcase] = \
                                                    test_execoutput[testcase]
                if test_failed_verdicts[testcase] == \
                                common_mix.GlobalConstants.COMMAND_UNCERTAIN:
                    found_a_failure = True

            # @Checkpoint: Chekpointing
            checkpoint_handler.do_checkpoint(func_name=cp_func_name, \
                                taskid=cp_task_id, \
                                tool=ttoolalias, \
                                opt_payload=meta_test_failedverdicts_outlog)

            if stop_on_failure and found_a_failure:
                # @Checkpoint: Chekpointing for remaining tools
                for rem_tool in list(testcases_by_tool.keys())[tpos + 1:]:
                    checkpoint_handler.do_checkpoint(func_name=cp_func_name, \
                                taskid=cp_task_id, \
                                tool=rem_tool, \
                                opt_payload=meta_test_failedverdicts_outlog)
                break

        if stop_on_failure:
            # Make sure the non executed test has the uncertain value (None)
            if len(meta_test_failedverdicts_outlog[0]) < len(meta_testcases):
                for meta_testcase in set(meta_testcases) - \
                                    set(meta_test_failedverdicts_outlog[0]):
                    meta_test_failedverdicts_outlog[0][meta_testcase] = \
                            common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT
                    meta_test_failedverdicts_outlog[1][meta_testcase] = \
                            common_matrices.OutputLogData.\
                                                    UNCERTAIN_TEST_OUTLOGDATA

        ERROR_HANDLER.assert_true(len(meta_test_failedverdicts_outlog[0]) == \
                                                        len(meta_testcases), \
                            "Not all tests have a verdict reported", __file__)

        if fault_test_execution_matrix_file is not None:
            # Load or Create the matrix
            fault_test_execution_matrix = common_matrices.ExecutionMatrix( \
                                filename=fault_test_execution_matrix_file, \
                                            non_key_col_list=meta_testcases)
            ERROR_HANDLER.assert_true(fault_test_execution_matrix.is_empty(), \
                                            "matrix must be empty", __file__)
            failverdict2val = {
                common_mix.GlobalConstants.FAIL_TEST_VERDICT: \
                        fault_test_execution_matrix.getActiveCellDefaultVal(),
                common_mix.GlobalConstants.PASS_TEST_VERDICT: \
                            fault_test_execution_matrix.getInactiveCellVal(),
                common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT: \
                    fault_test_execution_matrix.getUncertainCellDefaultVal(),
            }
            cells_dict = {}
            for meta_testcase in meta_test_failedverdicts_outlog[0]:
                cells_dict[meta_testcase] = failverdict2val[\
                            meta_test_failedverdicts_outlog[0][meta_testcase]]

            fault_test_execution_matrix.add_row_by_key(self.FAULT_MATRIX_KEY, \
                                                cells_dict, serialize=True)

        if fault_test_execution_execoutput_file is None:
            meta_test_failedverdicts_outlog[1] = None
        else:
            # Load or Create the data object
            fault_test_execution_execoutput = common_matrices.OutputLogData( \
                                filename=fault_test_execution_execoutput_file)
            ERROR_HANDLER.assert_true(\
                            fault_test_execution_execoutput.is_empty(), \
                                        "outlog data must be empty", __file__)
            fault_test_execution_execoutput.add_data(\
                                    {self.PROGRAM_EXECOUTPUT_KEY: \
                                         meta_test_failedverdicts_outlog[1]}, \
                                                                serialize=True)

        # @Checkpoint: Finished
        detailed_exectime = {}
        for ttoolalias in testcases_by_tool.keys():
            tt = self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY]
            detailed_exectime[ttoolalias] = (\
                        tt.get_checkpointer().get_execution_time(),\
                        tt.get_checkpointer().get_detailed_execution_time())

        checkpoint_handler.set_finished( \
                                    detailed_exectime_obj=detailed_exectime)

        if finish_destroy_checkpointer:
            checkpoint_handler.destroy()

        return meta_test_failedverdicts_outlog
예제 #18
0
    def runtests_criteria_coverage (self, testcases, criterion_to_matrix, \
                                    criterion_to_executionoutput=None,
                                    criteria_element_list_by_criteria=None, \
                                    re_instrument_code=False, \
                                    cover_criteria_elements_once=False,
                                    prioritization_module_by_criteria=None,
                                    parallel_count=1, \
                                    parallel_criteria_test_scheduler=None,\
                                    restart_checkpointer=False, \
                                    finish_destroy_checkpointer=True):
        ''' 
        Executes the instrumented executable code with testscases and
        returns the different code coverage matrices.

        :param testcases: list of testcases to execute

        :param criterion_to_matrix: dict of <criterion, Matrix file 
                        where to store coverage>. 
        :param criterion_to_executionoutput: dict of <criterion, execoutput 
                        file where to store coverage>. 
        
        :param criteria_element_list_by_criteria: dictionary representing the
                        list of criteria elements (stmts, branches, mutants)
                        to consider in the test execution matices. 
                        Key is the criterion and the value the list of elements

        :param re_instrument_code: Decide whether to instrument code before 
                        running the tests. (Example when instrumentation was 
                        not specifically called. This is True by default)

        :param cover_criteria_elements_once: Specify whether to cover criteria
                        elements once is enough, meaning that we stop 
                        analysing a criterion element once a test covers it.
                        The remaining test covering verdict will be UNKNOWN. 

        :param prioritization_module_by_criteria: dict of prioritization module
                        by criteria. None means no prioritization used.

        :type \parallel_count:
        :param \parallel_count:

        :type \parallel_criteria_test_scheduler:
        :param \parallel_criteria_test_scheduler: scheduler that organize 
                        parallelism across criteria tools.
                        (TODO: Implement support)

        :type \restart_checkpointer:
        :param \restart_checkpointer:

        :type finish_destroy_checkpointer:
        :param finish_destroy_checkpointer:
        '''

        # FIXME: Make sure that the support are implemented for
        # parallelism and test prioritization. Remove the code bellow
        # once supported:
        ERROR_HANDLER.assert_true(parallel_count <= 1, \
                    "Must implement parallel execution support here", \
                                                                    __file__)
        ERROR_HANDLER.assert_true(parallel_criteria_test_scheduler is None, \
            "Must implement parallel codes tests execution support here", \
                                                                    __file__)
        #~FIXMEnd

        # Check arguments Validity
        ERROR_HANDLER.assert_true(parallel_count > 0, \
                    "invalid parallel  execution count: {}. {}".format( \
                                    parallel_count, "must be >= 1"))

        # @Checkpoint: create a checkpoint handler
        cp_func_name = "runtests_criteria_coverage"
        cp_task_id = 1
        checkpoint_handler = CheckPointHandler( \
                                            self.get_checkpoint_state_object())
        if restart_checkpointer:
            checkpoint_handler.restart()
        if checkpoint_handler.is_finished():
            return

        ERROR_HANDLER.assert_true(len(criterion_to_matrix) > 0, \
                                        "no criterion is enabled", __file__)

        ERROR_HANDLER.assert_true(len(set(criterion_to_matrix) - \
                            set(self.tools_config_by_criterion_dict)) == 0, \
                    "Passed matrices output are more than tool specified", \
                                                                    __file__)

        if criterion_to_executionoutput is not None:
            ERROR_HANDLER.assert_true(set(criterion_to_matrix) == \
                                        set(criterion_to_executionoutput), \
                            "criteria mismatch between matrix and output", \
                                                                    __file__)

        tool2criteria = self._get_tool2criteria(criterion_to_matrix.keys())

        matrices_dir_tmp = os.path.join(self.criteria_working_dir, \
                                                            "codecov_dir.tmp")
        if os.path.isdir(matrices_dir_tmp):
            if restart_checkpointer:
                shutil.rmtree(matrices_dir_tmp)
                os.mkdir(matrices_dir_tmp)
        else:
            os.mkdir(matrices_dir_tmp)

        if criteria_element_list_by_criteria is None:
            criteria_element_list_by_criteria = \
                                        {c: None for c in criterion_to_matrix}

        # get criteria elements by tools
        criteria_elem_list_by_tool = {}
        for criterion in criteria_element_list_by_criteria:
            if criteria_element_list_by_criteria[criterion] is None:
                for t_conf in self.tools_config_by_criterion_dict[criterion]:
                    toolalias = t_conf.get_tool_config_alias()
                    if toolalias not in criteria_elem_list_by_tool:
                        criteria_elem_list_by_tool[toolalias] = {}
                    criteria_elem_list_by_tool[toolalias][criterion] = None
                continue

            ERROR_HANDLER.assert_true(\
                    len(criteria_element_list_by_criteria[criterion]) != 0, \
                    "Empty criteria element list for criterion "\
                                            +criterion.get_str(), __file__)
            for crit_elem in criteria_element_list_by_criteria[criterion]:
                toolalias, elem = DriversUtils.reverse_meta_element(crit_elem)
                if toolalias not in criteria_elem_list_by_tool:
                    criteria_elem_list_by_tool[toolalias] = {}
                if criterion not in criteria_elem_list_by_tool[toolalias]:
                    criteria_elem_list_by_tool[toolalias][criterion] = []
                criteria_elem_list_by_tool[toolalias][criterion].append(elem)

            ERROR_HANDLER.assert_true(len(set(criteria_elem_list_by_tool) - \
                                set(self.criteria_configured_tools)) == 0, \
                                "some tools in data not registered", __file__)

        crit2tool2matrixfile = {cv: {} for cv in criterion_to_matrix}
        crit2tool2outhashfile = {cv: {} for cv in criterion_to_executionoutput}
        for ctoolalias in tool2criteria:
            _criteria2matrix = {}
            _criteria2outhash = {}
            for criterion in tool2criteria[ctoolalias]:
                _criteria2matrix[criterion] = os.path.join(matrices_dir_tmp, \
                                                criterion.get_field_value()
                                                                + '-'
                                                                + ctoolalias
                                                                + '.csv')
                if criterion_to_executionoutput is None or \
                            criterion_to_executionoutput[criterion] is None:
                    _criteria2outhash[criterion] = None
                else:
                    _criteria2outhash[criterion] = \
                                            os.path.join(matrices_dir_tmp, \
                                                criterion.get_field_value()
                                                        + '-'
                                                        + ctoolalias
                                                        + '.outloghash.json')
                crit2tool2matrixfile[criterion][ctoolalias] = \
                                                    _criteria2matrix[criterion]
                crit2tool2outhashfile[criterion][ctoolalias] = \
                                                _criteria2outhash[criterion]

            # @Checkpoint: Check whether already executed
            if checkpoint_handler.is_to_execute( \
                                        func_name=cp_func_name, \
                                        taskid=cp_task_id, \
                                        tool=ctoolalias):
                for criterion in _criteria2matrix:
                    _criteria2matrix[criterion] = \
                                        common_matrices.ExecutionMatrix( \
                                        filename=_criteria2matrix[criterion], \
                                        non_key_col_list=testcases)
                    if _criteria2outhash[criterion] is not None:
                        _criteria2outhash[criterion] = \
                                        common_matrices.OutputLogData( \
                                        filename=_criteria2outhash[criterion])
                # Actual execution
                ctool = self.criteria_configured_tools[ctoolalias][\
                                                            self.TOOL_OBJ_KEY]
                ctool.runtests_criteria_coverage(testcases, \
                                criteria_element_list_by_criteria=\
                                        criteria_elem_list_by_tool[toolalias],\
                                criterion_to_matrix=_criteria2matrix, \
                                criterion_to_executionoutput=\
                                                            _criteria2outhash,\
                                re_instrument_code=re_instrument_code, \
                                cover_criteria_elements_once=\
                                                cover_criteria_elements_once, \
                                prioritization_module_by_criteria=\
                                            prioritization_module_by_criteria)

                # Checkpointing
                checkpoint_handler.do_checkpoint( \
                                        func_name=cp_func_name, \
                                        taskid=cp_task_id, \
                                        tool=ctoolalias)

        # Aggregate the matrices and out hashes
        ## Create reult matrices and out hashes
        result_matrices = {}
        result_outloghashes = {}
        for criterion in criterion_to_matrix:
            result_matrices[criterion] = common_matrices.ExecutionMatrix( \
                                filename=criterion_to_matrix[criterion], \
                                non_key_col_list=testcases)
            if criterion_to_executionoutput[criterion] is None:
                result_outloghashes[criterion] = None
            else:
                result_outloghashes[criterion] = \
                            common_matrices.OutputLogData(filename=\
                                    criterion_to_executionoutput[criterion])
                ERROR_HANDLER.assert_true(\
                            crit2tool2outhashfile[criterion] is not None,
                            "Bug: log enabled but hidden from tool", __file__)
        ## Actual aggregate
        logging.debug("saving results ...")
        for criterion in result_matrices:
            result_matrix = result_matrices[criterion]
            result_outloghash = result_outloghashes[criterion]
            for mtoolalias in crit2tool2matrixfile[criterion]:
                tool_matrix = common_matrices.ExecutionMatrix(\
                        filename=crit2tool2matrixfile[criterion][mtoolalias])

                # Check columns
                ERROR_HANDLER.assert_true(tool_matrix.get_key_colname() == \
                                            result_matrix.get_key_colname(), \
                                    "mismatch on key column name", __file__)
                ERROR_HANDLER.assert_true( \
                                set(tool_matrix.get_nonkey_colname_list()) == \
                                set(result_matrix.get_nonkey_colname_list()), \
                                "mismatch on non key column names", __file__)

                # bring in the data
                key2nonkeydict = tool_matrix.to_pandas_df().\
                        set_index(tool_matrix.get_key_colname(), drop=True).\
                                                to_dict(orient="index")

                for c_key in key2nonkeydict:
                    meta_c_key = DriversUtils.make_meta_element(\
                                                        str(c_key), mtoolalias)
                    result_matrix.add_row_by_key(meta_c_key,
                                                 key2nonkeydict[c_key],
                                                 serialize=False)

                # out log hash
                if crit2tool2outhashfile[criterion] is not None:
                    tool_outloghash = common_matrices.OutputLogData(\
                            filename=\
                                crit2tool2outhashfile[criterion][mtoolalias])
                    for objective, objective_data in \
                                tool_outloghash.get_zip_objective_and_data():
                        meta_objective = DriversUtils.make_meta_element(\
                                                    str(objective), mtoolalias)
                        result_outloghash.add_data(
                                            {meta_objective: objective_data}, \
                                            serialize=False)

            # @Checkpoint: Check whether already executed
            if checkpoint_handler.is_to_execute( \
                                        func_name=cp_func_name, \
                                        taskid=cp_task_id + 1,
                                        tool=criterion.get_str()):
                # Serialized the computed matrix
                result_matrix.serialize()
                if result_outloghash is not None:
                    result_outloghash.serialize()
            # @Checkpoint: Checkpointing
            checkpoint_handler.do_checkpoint( \
                                    func_name=cp_func_name, \
                                    taskid=cp_task_id + 1,
                                    tool=criterion.get_str())

        # Delete the temporary tool matrix's directory
        if os.path.isdir(matrices_dir_tmp):
            shutil.rmtree(matrices_dir_tmp)

        # @Checkpoint: Finished
        detailed_exectime = {}
        for ctoolalias in tool2criteria:
            ct = self.criteria_configured_tools[ctoolalias][self.TOOL_OBJ_KEY]
            detailed_exectime[ctoolalias] = (\
                        ct.get_checkpointer().get_execution_time(),\
                        ct.get_checkpointer().get_detailed_execution_time())

        checkpoint_handler.set_finished(\
                                    detailed_exectime_obj=detailed_exectime)

        if finish_destroy_checkpointer:
            checkpoint_handler.destroy()
예제 #19
0
    def update_matrix_to_cover_when_diference(cls, \
                                target_matrix_file, target_outdata_file, \
                                comparing_vector_file, comparing_outdata_file):
        ERROR_HANDLER.assert_true(target_matrix_file is not None \
                                and comparing_vector_file is not None, \
                                "target or comparing matrix is None", __file__)

        target_matrix = common_matrices.ExecutionMatrix(\
                                                filename=target_matrix_file)
        comparing_vector = common_matrices.ExecutionMatrix(\
                                                filename=comparing_vector_file)

        ERROR_HANDLER.assert_true(\
                            set(target_matrix.get_nonkey_colname_list()) == \
                            set(comparing_vector.get_nonkey_colname_list()), \
                                            "Mismatch of columns", __file__)

        # Get uncertain
        target_uncertain_cols_dict = \
                                target_matrix.query_uncertain_columns_of_rows()
        vector_uncertain_cols = \
                            comparing_vector.query_uncertain_columns_of_rows()
        vector_uncertain_cols = \
                    set(vector_uncertain_cols[list(vector_uncertain_cols)[0]])

        # Check if outdata and proceed accordingly
        if target_outdata_file is not None and \
                                            comparing_outdata_file is not None:
            # outdata are set use outdata values to decide of differences
            target_outdata = common_matrices.OutputLogData(\
                                                filename=target_outdata_file)
            vector_outdata = common_matrices.OutputLogData(\
                                            filename=comparing_outdata_file)

            vector_outdata_uniq, _ = list(\
                                vector_outdata.get_zip_objective_and_data())[0]

            ## Compare using output
            key_to_diffs = {}
            for key, key_data in target_outdata.get_zip_objective_and_data():
                intersect = set(vector_outdata_uniq) & set(key_data)
                key_to_diffs[key] = (set(vector_outdata_uniq) | \
                                                    set(key_data)) - intersect
                for elem in intersect:
                    if key_data[elem] != vector_outdata_uniq[elem]:
                        key_to_diffs[key].add(elem)
        else:
            # outdata is not set use difference of matrices
            ## obtain the active cols for each rows
            target_active_cols_dict = \
                                target_matrix.query_active_columns_of_rows()
            
            vector_active_cols = \
                                comparing_vector.query_active_columns_of_rows()
            vector_active_cols = \
                        set(vector_active_cols[list(vector_active_cols)[0]])

            ## for each row of target matrix, get diff
            key_to_diffs = {}
            for row_key, row_active in target_active_cols_dict.items():
                key_to_diffs[row_key] = (set(row_active) - vector_active_cols)\
                                    | (vector_active_cols - set(row_active))

        # Update matrix based on diff
        for key, diffs in key_to_diffs.items():
            # gather the uncertain and set to uncertain
            uncertain = vector_uncertain_cols | \
                                        set(target_uncertain_cols_dict[key])
            set_uncertain = uncertain & diffs
            diffs -= uncertain

            values = {}
            for col in set_uncertain:
                values[col] = target_matrix.getUncertainCellDefaultVal()
            for col in diffs:
                values[col] = target_matrix.getActiveCellDefaultVal()

            if len(values) > 0:
                target_matrix.update_cells(key, values)

        target_matrix.serialize()
    #~ def update_matrix_to_cover_when_diference()
#~class DriversUtils()
예제 #20
0
    def runtests(self, meta_testcases=None, exe_path_map=None, env_vars=None, \
                        stop_on_failure=False, \
                        per_test_timeout=None, \
                        use_recorded_timeout_times=None, \
                        recalculate_execution_times=False, \
                        fault_test_execution_matrix_file=None, \
                        fault_test_execution_execoutput_file=None, \
                        with_output_summary=True, \
                        hash_outlog=None, \
                        test_prioritization_module=None, \
                        parallel_test_count=1, \
                        parallel_test_scheduler=None, \
                        restart_checkpointer=False,
                        finish_destroy_checkpointer=True):
        '''
        Execute the list of test cases with the given executable and 
        say, for each test case, whether it failed

        :param meta_testcases: list of test cases to execute
        :param exe_path_map: string representing the file system path to 
                        the executable to execute with the tests
        :param env_vars: dict of environment variables to set before
                        executing each test ({<variable>: <value>})
        :param stop_on_failure: decide whether to stop the test 
                        execution once a test fails
        :param fault_test_execution_matrix_file: Optional matrix file 
                        to store the tests' pass fail execution data
        :param fault_test_execution_execoutput_file: Optional output log file 
                        to store the tests' execution actual output (hashed)
        :param with_output_summary: decide whether to return outlog hash 
        :type hash_outlog: bool
        :hash_outlog: decide whether to hash the outlog or not
        :param test_prioritization_module: Specify the test prioritization
                        module. 
                        (TODO: Implement support)
        :param parallel_test_count: Specify the number of parallel test
                        Execution. must be an integer >= 1 or None.
                        When None, the max possible value is used.
        :param parallel_test_scheduler: Specify the function that will
                        handle parallel test scheduling by tool, using
                        the test execution optimizer. 
                        (TODO: Implement support)

        :type restart_checkointer: bool
        :param restart_checkointer: Decide whether to discard checkpoint
                        and restart anew.

        :type finish_destroy_checkpointer: bool
        :param finish_destroy_checkpointer: Decide whether to automatically 
                        destroy the checkpointer when done or not
                        Useful is caller has a checkpointer to update. 

        :returns: dict of testcase and their failed verdict.
                 {<test case name>: <True if failed, False if passed,
                    UNCERTAIN_TEST_VERDICT if uncertain>}
                 If stop_on_failure is True, only return the tests that 
                 have been executed until the failure
        '''

        ERROR_HANDLER.assert_true(meta_testcases is not None, \
                                            "Must specify testcases", __file__)

        # FIXME: Make sure that the support are implemented for
        # parallelism and test prioritization. Remove the code bellow
        # once supported:
        ERROR_HANDLER.assert_true(test_prioritization_module is None, \
                        "Must implement test prioritization support here", \
                                                                    __file__)
        ERROR_HANDLER.assert_true(parallel_test_scheduler is None, \
                    "Must implement parallel tests execution support here", \
                                                                    __file__)
        #~FIXMEnd

        # Check arguments Validity
        if exe_path_map is None:
            exe_path_map = self._get_default_exe_path_map()

        if hash_outlog is None:
            hash_outlog = self.hash_outlog

        ERROR_HANDLER.assert_true(parallel_test_count is None \
                                        or parallel_test_count >= 1, \
                                "invalid parallel tests count ({})".format(\
                                                parallel_test_count), __file__)

        # @Checkpoint: create a checkpoint handler
        cp_func_name = "runtests"
        cp_task_id = 1
        checkpoint_handler = \
                CheckPointHandler(self.get_checkpoint_state_object())
        if restart_checkpointer:
            checkpoint_handler.restart()
        if checkpoint_handler.is_finished():
            logging.warning("%s %s" %("The function 'runtests' is finished", \
                "according to checkpoint, but called again. None returned"))
            if common_mix.confirm_execution("%s %s" % ( \
                                        "Function 'runtests' is already", \
                                        "finished, do you want to restart?")):
                checkpoint_handler.restart()
                logging.info("Restarting the finished 'runtests'")
            else:
                ERROR_HANDLER.error_exit(err_string="%s %s %s" % (\
                        "Execution halted. Cannot continue because no value", \
                        " can be returned. Check the results of the", \
                        "finished execution"), call_location=__file__)

        # @Checkpoint: Get the saved payload (data kapt for each tool)
        # pair list of testfailed verdict and execution output
        meta_test_failedverdicts_outlog = \
                                    checkpoint_handler.get_optional_payload()
        if meta_test_failedverdicts_outlog is None:
            meta_test_failedverdicts_outlog = [{}, {}]

        # Make sure the tests are unique
        ERROR_HANDLER.assert_true(len(meta_testcases) == \
                                                len(set(meta_testcases)), \
                                        "not all tests are unique", __file__)

        # For fdupes
        if len(self.tests_duplicates_map) > 0:
            meta_testcases_backup = meta_testcases
            meta_testcases = set(meta_testcases)
            dups_remove_meta_testcases = meta_testcases & \
                                                set(self.tests_duplicates_map)
            dup_toadd_test = {self.tests_duplicates_map[v] for v in \
                                dups_remove_meta_testcases} - meta_testcases
            meta_testcases = (meta_testcases - dups_remove_meta_testcases) \
                                                            | dup_toadd_test

        testcases_by_tool = {}
        for meta_testcase in meta_testcases:
            ttoolalias, testcase = \
                            DriversUtils.reverse_meta_element(meta_testcase)
            if ttoolalias not in testcases_by_tool:
                testcases_by_tool[ttoolalias] = []
            testcases_by_tool[ttoolalias].append(testcase)

        candidate_aliases = []
        for tpos, ttoolalias in enumerate(testcases_by_tool.keys()):
            # @Checkpoint: Check whether already executed
            if not checkpoint_handler.is_to_execute(func_name=cp_func_name, \
                                                taskid=cp_task_id, \
                                                tool=ttoolalias):
                continue
            candidate_aliases.append(ttoolalias)

        # parallelism strategies
        PARA_FULL_DOUBLE = 0
        PARA_ALT_TOOLS_AND_TESTS = 1
        PARA_TOOLS_ONLY = 2
        PARA_TOOLS_TESTS_AS_TOOLS = 3

        parallel_strategy = PARA_TOOLS_ONLY

        # minimum number of tests (accross) for parallelism
        ptest_tresh = 5
        # minimum number of tests (of the given tool) for tool parallelism
        sub_ptest_thresh = 3

        shared_loc = multiprocessing.RLock()

        parallel_test_count_by_tool = {ta: 1 for ta in candidate_aliases}

        # tool with parallel test exec
        # TODO: find way to pass parallel count here
        if parallel_test_count is None:
            #parallel_test_count = min(10, multiprocessing.cpu_count())
            parallel_test_count = min(20, 2 * multiprocessing.cpu_count())

        cand_alias_joblib = []
        cand_alias_for = []

        para_tools = []
        para_tools = [tt for tt in candidate_aliases if \
                            (len(testcases_by_tool[tt]) >= sub_ptest_thresh \
                              and self.testcases_configured_tools[tt]\
                               [self.TOOL_OBJ_KEY].can_run_tests_in_parallel())
                        ]

        actual_parallel_cond = len(candidate_aliases) > 1 \
                                     and len(meta_testcases) >= ptest_tresh \
                                        and parallel_test_count is not None \
                                        and parallel_test_count > 1

        if parallel_strategy == PARA_ALT_TOOLS_AND_TESTS:
            # the para_tools will run without parallelism, give them all threads
            for tt in para_tools:
                parallel_test_count_by_tool[tt] = parallel_test_count
            seq_tools = list(set(candidate_aliases) - set(para_tools))
            if len(seq_tools) > 1 and actual_parallel_cond:
                cand_alias_joblib = seq_tools
                cand_alias_for = para_tools
            else:
                cand_alias_for = candidate_aliases
        elif parallel_strategy == PARA_TOOLS_ONLY:
            if actual_parallel_cond:
                cand_alias_joblib = candidate_aliases
            else:
                cand_alias_for = candidate_aliases
        elif parallel_strategy == PARA_FULL_DOUBLE:
            # use parallel
            sub_parallel_count = 0 if parallel_test_count is None else \
                        parallel_test_count - len(parallel_test_count_by_tool)
            if sub_parallel_count > 0:
                para_tools.sort(reverse=True, \
                                        key=lambda x: len(testcases_by_tool[x]))
                para_tools_n_tests = sum(\
                            [len(testcases_by_tool[tt]) for tt in para_tools])

                used = 0
                for tt in para_tools:
                    quota = int(len(testcases_by_tool[tt]) * \
                                    sub_parallel_count / para_tools_n_tests)
                    parallel_test_count_by_tool[tt] += quota
                    used += quota
                for tt in para_tools:
                    if used == sub_parallel_count:
                        break
                    parallel_test_count_by_tool[tt] += 1

            if actual_parallel_cond:
                cand_alias_joblib = candidate_aliases
            else:
                cand_alias_for = candidate_aliases
        elif parallel_strategy == PARA_TOOLS_TESTS_AS_TOOLS:
            # split the tests of one tool and
            # make the same tool run multiple times
            ERROR_HANDLER.error_exit("To Be implemented: same tool many times")
        else:
            ERROR_HANDLER.error_exit("Invalid parallel startegy")

        def tool_parallel_test_exec(ttoolalias):
            # Actual execution
            found_a_failure = False
            # Whether the execution was unsuccessful
            test_error = False
            ttool = \
                self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY]
            test_failed_verdicts, test_execoutput = ttool.runtests( \
                                testcases_by_tool[ttoolalias], \
                                exe_path_map, env_vars, \
                                stop_on_failure, \
                                per_test_timeout=per_test_timeout,
                                use_recorded_timeout_times=\
                                    use_recorded_timeout_times, \
                                recalculate_execution_times=\
                                    recalculate_execution_times, \
                                with_output_summary=\
                                            with_output_summary, \
                                hash_outlog=hash_outlog, \
                                parallel_count=\
                                    parallel_test_count_by_tool[ttoolalias])
            with shared_loc:
                for testcase in test_failed_verdicts:
                    meta_testcase = DriversUtils.make_meta_element(\
                                                        testcase, ttoolalias)
                    meta_test_failedverdicts_outlog[0][meta_testcase] = \
                                                test_failed_verdicts[testcase]
                    meta_test_failedverdicts_outlog[1][meta_testcase] = \
                                                    test_execoutput[testcase]
                    if not found_a_failure \
                                and test_failed_verdicts[testcase] == \
                                common_mix.GlobalConstants.FAIL_TEST_VERDICT:
                        found_a_failure = True
                    if not test_error \
                                and test_failed_verdicts[testcase] == \
                            common_mix.GlobalConstants.TEST_EXECUTION_ERROR:
                        test_error = True

                # @Checkpoint: Chekpointing
                checkpoint_handler.do_checkpoint(func_name=cp_func_name, \
                                taskid=cp_task_id, \
                                tool=ttoolalias, \
                                opt_payload=meta_test_failedverdicts_outlog)
            return found_a_failure, test_error

        #~ def tool_parallel_test_exec()

        if len(cand_alias_joblib) > 0:
            parallel_count_ = min(len(cand_alias_joblib), parallel_test_count)
            joblib.Parallel(n_jobs=parallel_count_, require='sharedmem')\
                    (joblib.delayed(tool_parallel_test_exec)(ttoolalias) \
                        for ttoolalias in cand_alias_joblib)
        if len(cand_alias_for) > 0:
            for tpos, ttoolalias in enumerate(cand_alias_for):
                found_a_failure, test_error = \
                                        tool_parallel_test_exec(ttoolalias)
                if stop_on_failure and found_a_failure:
                    # @Checkpoint: Chekpointing for remaining tools
                    for rem_tool in list(testcases_by_tool.keys())[tpos + 1:]:
                        checkpoint_handler.do_checkpoint(\
                                func_name=cp_func_name, \
                                taskid=cp_task_id, \
                                tool=rem_tool, \
                                opt_payload=meta_test_failedverdicts_outlog)
                    break

        if stop_on_failure:
            # Make sure the non executed test has the uncertain value (None)
            if len(meta_test_failedverdicts_outlog[0]) < len(meta_testcases):
                for meta_testcase in set(meta_testcases) - \
                                    set(meta_test_failedverdicts_outlog[0]):
                    meta_test_failedverdicts_outlog[0][meta_testcase] = \
                            common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT
                    meta_test_failedverdicts_outlog[1][meta_testcase] = \
                            common_matrices.OutputLogData.\
                                                    UNCERTAIN_TEST_OUTLOGDATA

        ERROR_HANDLER.assert_true(len(meta_test_failedverdicts_outlog[0]) == \
                                                        len(meta_testcases), \
                    "mismatch between number of tests and reported verdicts:"
                    + " Tests without verdict are {};".format(\
                               set(meta_testcases) - \
                                    set(meta_test_failedverdicts_outlog[0])) \
                    + " Test not in testlist are {}.".format(\
                               set(meta_test_failedverdicts_outlog[0]) - \
                                    set(meta_testcases)), \
                                                                     __file__)

        # For fdupes
        if len(self.tests_duplicates_map) > 0:
            meta_testcases = meta_testcases_backup
            for i in (0, 1):
                for mtest in dups_remove_meta_testcases:
                    # add to results
                    meta_test_failedverdicts_outlog[i][mtest] = copy.deepcopy(\
                                        meta_test_failedverdicts_outlog[i]\
                                            [self.tests_duplicates_map[mtest]])
                for mtest in dup_toadd_test:
                    # remove from results
                    del meta_test_failedverdicts_outlog[i][mtest]

        if fault_test_execution_matrix_file is not None:
            # Load or Create the matrix
            fault_test_execution_matrix = common_matrices.ExecutionMatrix( \
                                filename=fault_test_execution_matrix_file, \
                                            non_key_col_list=meta_testcases)
            ERROR_HANDLER.assert_true(fault_test_execution_matrix.is_empty(), \
                                "matrix must be empty. Filename is:"
                                " "+fault_test_execution_matrix_file, __file__)
            failverdict2val = {
                common_mix.GlobalConstants.FAIL_TEST_VERDICT: \
                        fault_test_execution_matrix.getActiveCellDefaultVal(),
                common_mix.GlobalConstants.PASS_TEST_VERDICT: \
                            fault_test_execution_matrix.getInactiveCellVal(),
                common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT: \
                    fault_test_execution_matrix.getUncertainCellDefaultVal(),
            }
            cells_dict = {}
            for meta_testcase in meta_test_failedverdicts_outlog[0]:
                cells_dict[meta_testcase] = failverdict2val[\
                            meta_test_failedverdicts_outlog[0][meta_testcase]]

            fault_test_execution_matrix.add_row_by_key(self.FAULT_MATRIX_KEY, \
                                                cells_dict, serialize=True)

        if fault_test_execution_execoutput_file is not None:
            # Load or Create the data object
            fault_test_execution_execoutput = common_matrices.OutputLogData( \
                                filename=fault_test_execution_execoutput_file)
            ERROR_HANDLER.assert_true(\
                            fault_test_execution_execoutput.is_empty(), \
                                        "outlog data must be empty", __file__)
            fault_test_execution_execoutput.add_data(\
                                    {self.PROGRAM_EXECOUTPUT_KEY: \
                                         meta_test_failedverdicts_outlog[1]}, \
                                                                serialize=True)

        # @Checkpoint: Finished
        detailed_exectime = {}
        for ttoolalias in testcases_by_tool.keys():
            tt = self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY]
            detailed_exectime[ttoolalias] = (\
                        tt.get_checkpointer().get_execution_time(),\
                        tt.get_checkpointer().get_detailed_execution_time())

        checkpoint_handler.set_finished( \
                                    detailed_exectime_obj=detailed_exectime)

        if finish_destroy_checkpointer:
            checkpoint_handler.destroy()

        return meta_test_failedverdicts_outlog
예제 #21
0
 def _get_test_objective_tests_from_matrix(self, matrix_file):
     matrix = common_matrices.ExecutionMatrix(filename=matrix_file)
     res = matrix.query_active_columns_of_rows()
     return res