def getFaultRevealingMutants (strong_mutant_kill_matrix_file, \
                                expected_program_output_file, \
                                program_output_file, \
                                threshold=1.0, \
                                selected_tests=None):
    """
    This function compute the set of fault revealing mutants.
    
    The inputs are:
    - mutant kill matrix file, 
    - expected program output file, Used to see which test fails
    - obtained program output file, Used to see which test fails
    - threshold, in case a relaxed fault revealing is looked for
    - selected tests, in case part of the tests should be used
    
    :return: A pair is returned, with first element the set of fault revealing
            Mutants, and second element, a dict with key the mutants and values
            the fault revelation ratio 
            ('# test kill and find fault' divided (/) '# test that kill')
            For equivalent mutants, the division isinvalid, 
            we set the value to -1
    """
    prog_out = common_matrices.OutputLogData(filename=program_output_file)
    exp_prog_out = common_matrices.OutputLogData(\
                                            filename=expected_program_output_file)
    _, prog_out_uniq = list(prog_out.get_zip_objective_and_data())[0]
    _, exp_prog_out_uniq = list(exp_prog_out.get_zip_objective_and_data())[0]
    
    if set(prog_out_uniq) != set(exp_prog_out_uniq):
        logging.warning("Test mismatch between program output and expected!")
    intersect = set(prog_out_uniq) & set(exp_prog_out_uniq)
    fault_tests = set()
    for elem in intersect:
        ol_equiv = common_matrices.OutputLogData.outlogdata_equiv(\
                                    prog_out_uniq[elem], exp_prog_out_uniq[elem])
        if not ol_equiv:
            ERROR_HANDLER.assert_true (elem not in fault_tests, \
                                                        "duplicate test", __file__)
            fault_tests.add(elem)
            
    # get mutant to killing test dict
    kill_matrix = common_matrices.ExecutionMatrix(\
                                          filename=strong_mutant_kill_matrix_file)
    mut_to_killtests = kill_matrix.query_active_columns_of_rows()
    
    # remove unselected tests
    if selected_tests is not None:
        selected_tests = set(selected_tests)
        for mut, tests in mut_to_kiltests.items():
            mut_to_killtests[mut] = set(tests) & selected_tests
    
    mutant_to_fr = {}
    mut, tests in mut_to_killtests.items():
        kill_fr = len(tests & fault_tests)
        kill_all = len(tests)
        if kill_all > 0: 
            # Killable
            mutant_to_fr[mut] = -1.0
        else:
            mutant_to_fr[mut] = kill_fr * 1.0 / kill_all
Exemple #2
0
def load(resdir, fault_revealing=True):
    # load fault revealing tests
    fail_tests = None
    if fault_revealing:
        f_file = os.path.join(resdir, "fail_test_checking",
                              "fault_reveling_tests.txt")
        fail_tests = []
        with open(f_file) as f:
            for line in f:
                fail_tests.append(line.strip())

    # load all tests
    post_pf_file = os.path.join(resdir, "post", "RESULTS_DATA", "matrices",
                                "PASSFAIL.csv")
    post_pf_mat = common_matrices.ExecutionMatrix(post_pf_file)
    all_tests_post = post_pf_mat.get_nonkey_colname_list()
    pre_pf_file = os.path.join(resdir, "pre", "RESULTS_DATA", "matrices",
                               "PASSFAIL.csv")
    pre_pf_mat = common_matrices.ExecutionMatrix(pre_pf_file)
    all_tests_pre = pre_pf_mat.get_nonkey_colname_list()

    # Conider test thta are not flaky in both pre and post
    all_tests = list(set(all_tests_pre) & set(all_tests_post))

    # load execution outputs
    pre_orig_outlog_file = os.path.join(resdir, "pre", "RESULTS_DATA",
                                        "testexecution_outputs",
                                        "program_output.json")
    pre_muts_outlog_file = os.path.join(resdir, "pre", "RESULTS_DATA",
                                        "testexecution_outputs",
                                        "STRONG_MUTATION_output.json")
    post_orig_outlog_file = os.path.join(resdir, "post", "RESULTS_DATA",
                                         "testexecution_outputs",
                                         "program_output.json")
    post_muts_outlog_file = os.path.join(resdir, "post", "RESULTS_DATA",
                                         "testexecution_outputs",
                                         "STRONG_MUTATION_output.json")

    pre_orig_outlog = common_matrices.OutputLogData(pre_orig_outlog_file)
    pre_muts_outlog = common_matrices.OutputLogData(pre_muts_outlog_file)
    post_orig_outlog = common_matrices.OutputLogData(post_orig_outlog_file)
    post_muts_outlog = common_matrices.OutputLogData(post_muts_outlog_file)

    # Compute relevant mutants
    relevant_mutants_to_relevant_tests = get_relevant_mutants_to_relevant_tests(
        pre_orig_outlog, pre_muts_outlog, post_orig_outlog, post_muts_outlog)

    # load matrices and compute mutant killtest mapping
    post_sm_file = os.path.join(resdir, "post", "RESULTS_DATA", "matrices",
                                "STRONG_MUTATION.csv")
    post_sm_mat = common_matrices.ExecutionMatrix(post_sm_file)
    mutants_to_killingtests = post_sm_mat.query_active_columns_of_rows()
    tests_to_killed_mutants = post_sm_mat.query_active_rows_of_columns()

    # return data
    return all_tests, fail_tests, relevant_mutants_to_relevant_tests, mutants_to_killingtests, tests_to_killed_mutants
Exemple #3
0
 def merge_lexecoutput_into_right(lexecoutput_file, rexecoutput_file):
     if not os.path.isfile(rexecoutput_file):
         shutil.copy2(lexecoutput_file, rexecoutput_file)
     else:
         lexecoutput = common_matrices.OutputLogData(\
                                                 filename=lexecoutput_file)
         rexecoutput = common_matrices.OutputLogData(\
                                                 filename=rexecoutput_file)
         rexecoutput.update_with_other(lexecoutput)
         rexecoutput.serialize()
Exemple #4
0
 def merge_lexecoutput_into_right(lexecoutput_file, rexecoutput_file):
     if not os.path.isfile(rexecoutput_file):
         shutil.copy2(lexecoutput_file, rexecoutput_file)
     else:
         lexecoutput = common_matrices.OutputLogData(\
                                                 filename=lexecoutput_file)
         rexecoutput = common_matrices.OutputLogData(\
                                                 filename=rexecoutput_file)
         rexecoutput.update_with_other(lexecoutput, override_existing=True,\
                                 ask_confirmation_with_exist_missing=True)
         rexecoutput.serialize()
Exemple #5
0
def _extract_list(folder, res_list_file):
    _, old_o = list(
        common_matrices.OutputLogData(
            os.path.join(
                folder, 'old',
                'program_output.json')).get_zip_objective_and_data())[0]
    _, new_o = list(
        common_matrices.OutputLogData(
            os.path.join(
                folder, 'new',
                'program_output.json')).get_zip_objective_and_data())[0]
    assert len(old_o) == len(new_o)
    diff_tests = []
    for tc in old_o:
        eq = common_matrices.OutputLogData.outlogdata_equiv(
            old_o[tc], new_o[tc])
        assert eq is not None, "PB"
        if not eq:
            diff_tests.append(tc)
    with open(res_list_file, "w") as f:
        for tc in diff_tests:
            f.write(tc + "\n")
    print("# list printed into {}".format(res_list_file))
Exemple #6
0
    def check_get_flakiness(self, meta_testcases, repeat_count=2, \
                                              get_flaky_tests_outputs=True):
        """
            Check if tests have flakiness by running multiple times
            :return: The list of flaky tests
        """
        ERROR_HANDLER.assert_true(
            repeat_count > 1, "Cannot check flakiness"
            " with less than on repetition", __file__)

        if os.path.isdir(self.flakiness_workdir):
            shutil.rmtree(self.flakiness_workdir)
        os.mkdir(self.flakiness_workdir)

        outlog_files = [os.path.join(self.flakiness_workdir, \
                            str(of)+'-out.json') for of in range(repeat_count)]
        matrix_files = [os.path.join(self.flakiness_workdir, \
                            str(of)+'-mat.csv') for of in range(repeat_count)]

        def run(rep, test_list, hash_outlog):
            self.runtests(test_list, \
                        fault_test_execution_matrix_file=matrix_files[rep], \
                        fault_test_execution_execoutput_file=\
                                                            outlog_files[rep],\
                        with_output_summary=True, \
                        hash_outlog=hash_outlog)

        #~ def run()

        # Execute with repetition and get output summaries
        for rep in range(repeat_count):
            run(rep, meta_testcases, True)

        # get flaky tests list
        flaky_tests = set()
        fix_outdata = common_matrices.OutputLogData(filename=outlog_files[0])
        fix_outdata = list(fix_outdata.get_zip_objective_and_data())[0][1]
        for i in range(1, repeat_count):
            other_outdata = common_matrices.OutputLogData(\
                                                    filename=outlog_files[i])
            other_outdata = list(\
                            other_outdata.get_zip_objective_and_data())[0][1]
            for test, t_dat_fix in fix_outdata.items():
                if test in flaky_tests:
                    continue
                t_dat_other = other_outdata[test]
                if not common_matrices.OutputLogData.outlogdata_equiv(\
                                                    t_dat_fix, t_dat_other):
                    flaky_tests.add(test)

        ## cleanup
        for f in outlog_files + matrix_files:
            if os.path.isfile(f):
                os.remove(f)

        # get flaky tests outputs
        if get_flaky_tests_outputs and len(flaky_tests) > 0:
            for rep in range(repeat_count):
                run(rep, list(flaky_tests), False)
            flaky_test_list_file = os.path.join(self.flakiness_workdir, \
                                                        "flaky_test_list.json")
            logging.warning("There were some flaky tests (see file {})"\
                                                .format(flaky_test_list_file))
            common_fs.dumpJSON(list(flaky_tests), flaky_test_list_file, \
                                                                pretty=True)
        return list(flaky_tests)
Exemple #7
0
    def runtests(self, meta_testcases=None, exe_path_map=None, env_vars=None, \
                        stop_on_failure=False, \
                        per_test_timeout=None, \
                        use_recorded_timeout_times=None, \
                        recalculate_execution_times=False, \
                        fault_test_execution_matrix_file=None, \
                        fault_test_execution_execoutput_file=None, \
                        with_output_summary=True, \
                        hash_outlog=None, \
                        test_prioritization_module=None, \
                        parallel_test_count=1, \
                        parallel_test_scheduler=None, \
                        restart_checkpointer=False,
                        finish_destroy_checkpointer=True):
        '''
        Execute the list of test cases with the given executable and 
        say, for each test case, whether it failed

        :param meta_testcases: list of test cases to execute
        :param exe_path_map: string representing the file system path to 
                        the executable to execute with the tests
        :param env_vars: dict of environment variables to set before
                        executing each test ({<variable>: <value>})
        :param stop_on_failure: decide whether to stop the test 
                        execution once a test fails
        :param fault_test_execution_matrix_file: Optional matrix file 
                        to store the tests' pass fail execution data
        :param fault_test_execution_execoutput_file: Optional output log file 
                        to store the tests' execution actual output (hashed)
        :param with_output_summary: decide whether to return outlog hash 
        :type hash_outlog: bool
        :hash_outlog: decide whether to hash the outlog or not
        :param test_prioritization_module: Specify the test prioritization
                        module. 
                        (TODO: Implement support)
        :param parallel_test_count: Specify the number of parallel test
                        Execution. must be an integer >= 1 or None.
                        When None, the max possible value is used.
        :param parallel_test_scheduler: Specify the function that will
                        handle parallel test scheduling by tool, using
                        the test execution optimizer. 
                        (TODO: Implement support)

        :type restart_checkointer: bool
        :param restart_checkointer: Decide whether to discard checkpoint
                        and restart anew.

        :type finish_destroy_checkpointer: bool
        :param finish_destroy_checkpointer: Decide whether to automatically 
                        destroy the checkpointer when done or not
                        Useful is caller has a checkpointer to update. 

        :returns: dict of testcase and their failed verdict.
                 {<test case name>: <True if failed, False if passed,
                    UNCERTAIN_TEST_VERDICT if uncertain>}
                 If stop_on_failure is True, only return the tests that 
                 have been executed until the failure
        '''

        ERROR_HANDLER.assert_true(meta_testcases is not None, \
                                            "Must specify testcases", __file__)

        # FIXME: Make sure that the support are implemented for
        # parallelism and test prioritization. Remove the code bellow
        # once supported:
        ERROR_HANDLER.assert_true(test_prioritization_module is None, \
                        "Must implement test prioritization support here", \
                                                                    __file__)
        ERROR_HANDLER.assert_true(parallel_test_scheduler is None, \
                    "Must implement parallel tests execution support here", \
                                                                    __file__)
        #~FIXMEnd

        # Check arguments Validity
        if exe_path_map is None:
            exe_path_map = self._get_default_exe_path_map()

        if hash_outlog is None:
            hash_outlog = self.hash_outlog

        ERROR_HANDLER.assert_true(parallel_test_count is None \
                                        or parallel_test_count >= 1, \
                                "invalid parallel tests count ({})".format(\
                                                parallel_test_count), __file__)

        # @Checkpoint: create a checkpoint handler
        cp_func_name = "runtests"
        cp_task_id = 1
        checkpoint_handler = \
                CheckPointHandler(self.get_checkpoint_state_object())
        if restart_checkpointer:
            checkpoint_handler.restart()
        if checkpoint_handler.is_finished():
            logging.warning("%s %s" %("The function 'runtests' is finished", \
                "according to checkpoint, but called again. None returned"))
            if common_mix.confirm_execution("%s %s" % ( \
                                        "Function 'runtests' is already", \
                                        "finished, do you want to restart?")):
                checkpoint_handler.restart()
                logging.info("Restarting the finished 'runtests'")
            else:
                ERROR_HANDLER.error_exit(err_string="%s %s %s" % (\
                        "Execution halted. Cannot continue because no value", \
                        " can be returned. Check the results of the", \
                        "finished execution"), call_location=__file__)

        # @Checkpoint: Get the saved payload (data kapt for each tool)
        # pair list of testfailed verdict and execution output
        meta_test_failedverdicts_outlog = \
                                    checkpoint_handler.get_optional_payload()
        if meta_test_failedverdicts_outlog is None:
            meta_test_failedverdicts_outlog = [{}, {}]

        # Make sure the tests are unique
        ERROR_HANDLER.assert_true(len(meta_testcases) == \
                                                len(set(meta_testcases)), \
                                        "not all tests are unique", __file__)

        # For fdupes
        if len(self.tests_duplicates_map) > 0:
            meta_testcases_backup = meta_testcases
            meta_testcases = set(meta_testcases)
            dups_remove_meta_testcases = meta_testcases & \
                                                set(self.tests_duplicates_map)
            dup_toadd_test = {self.tests_duplicates_map[v] for v in \
                                dups_remove_meta_testcases} - meta_testcases
            meta_testcases = (meta_testcases - dups_remove_meta_testcases) \
                                                            | dup_toadd_test

        testcases_by_tool = {}
        for meta_testcase in meta_testcases:
            ttoolalias, testcase = \
                            DriversUtils.reverse_meta_element(meta_testcase)
            if ttoolalias not in testcases_by_tool:
                testcases_by_tool[ttoolalias] = []
            testcases_by_tool[ttoolalias].append(testcase)

        candidate_aliases = []
        for tpos, ttoolalias in enumerate(testcases_by_tool.keys()):
            # @Checkpoint: Check whether already executed
            if not checkpoint_handler.is_to_execute(func_name=cp_func_name, \
                                                taskid=cp_task_id, \
                                                tool=ttoolalias):
                continue
            candidate_aliases.append(ttoolalias)

        # parallelism strategies
        PARA_FULL_DOUBLE = 0
        PARA_ALT_TOOLS_AND_TESTS = 1
        PARA_TOOLS_ONLY = 2
        PARA_TOOLS_TESTS_AS_TOOLS = 3

        parallel_strategy = PARA_TOOLS_ONLY

        # minimum number of tests (accross) for parallelism
        ptest_tresh = 5
        # minimum number of tests (of the given tool) for tool parallelism
        sub_ptest_thresh = 3

        shared_loc = multiprocessing.RLock()

        parallel_test_count_by_tool = {ta: 1 for ta in candidate_aliases}

        # tool with parallel test exec
        # TODO: find way to pass parallel count here
        if parallel_test_count is None:
            #parallel_test_count = min(10, multiprocessing.cpu_count())
            parallel_test_count = min(20, 2 * multiprocessing.cpu_count())

        cand_alias_joblib = []
        cand_alias_for = []

        para_tools = []
        para_tools = [tt for tt in candidate_aliases if \
                            (len(testcases_by_tool[tt]) >= sub_ptest_thresh \
                              and self.testcases_configured_tools[tt]\
                               [self.TOOL_OBJ_KEY].can_run_tests_in_parallel())
                        ]

        actual_parallel_cond = len(candidate_aliases) > 1 \
                                     and len(meta_testcases) >= ptest_tresh \
                                        and parallel_test_count is not None \
                                        and parallel_test_count > 1

        if parallel_strategy == PARA_ALT_TOOLS_AND_TESTS:
            # the para_tools will run without parallelism, give them all threads
            for tt in para_tools:
                parallel_test_count_by_tool[tt] = parallel_test_count
            seq_tools = list(set(candidate_aliases) - set(para_tools))
            if len(seq_tools) > 1 and actual_parallel_cond:
                cand_alias_joblib = seq_tools
                cand_alias_for = para_tools
            else:
                cand_alias_for = candidate_aliases
        elif parallel_strategy == PARA_TOOLS_ONLY:
            if actual_parallel_cond:
                cand_alias_joblib = candidate_aliases
            else:
                cand_alias_for = candidate_aliases
        elif parallel_strategy == PARA_FULL_DOUBLE:
            # use parallel
            sub_parallel_count = 0 if parallel_test_count is None else \
                        parallel_test_count - len(parallel_test_count_by_tool)
            if sub_parallel_count > 0:
                para_tools.sort(reverse=True, \
                                        key=lambda x: len(testcases_by_tool[x]))
                para_tools_n_tests = sum(\
                            [len(testcases_by_tool[tt]) for tt in para_tools])

                used = 0
                for tt in para_tools:
                    quota = int(len(testcases_by_tool[tt]) * \
                                    sub_parallel_count / para_tools_n_tests)
                    parallel_test_count_by_tool[tt] += quota
                    used += quota
                for tt in para_tools:
                    if used == sub_parallel_count:
                        break
                    parallel_test_count_by_tool[tt] += 1

            if actual_parallel_cond:
                cand_alias_joblib = candidate_aliases
            else:
                cand_alias_for = candidate_aliases
        elif parallel_strategy == PARA_TOOLS_TESTS_AS_TOOLS:
            # split the tests of one tool and
            # make the same tool run multiple times
            ERROR_HANDLER.error_exit("To Be implemented: same tool many times")
        else:
            ERROR_HANDLER.error_exit("Invalid parallel startegy")

        def tool_parallel_test_exec(ttoolalias):
            # Actual execution
            found_a_failure = False
            # Whether the execution was unsuccessful
            test_error = False
            ttool = \
                self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY]
            test_failed_verdicts, test_execoutput = ttool.runtests( \
                                testcases_by_tool[ttoolalias], \
                                exe_path_map, env_vars, \
                                stop_on_failure, \
                                per_test_timeout=per_test_timeout,
                                use_recorded_timeout_times=\
                                    use_recorded_timeout_times, \
                                recalculate_execution_times=\
                                    recalculate_execution_times, \
                                with_output_summary=\
                                            with_output_summary, \
                                hash_outlog=hash_outlog, \
                                parallel_count=\
                                    parallel_test_count_by_tool[ttoolalias])
            with shared_loc:
                for testcase in test_failed_verdicts:
                    meta_testcase = DriversUtils.make_meta_element(\
                                                        testcase, ttoolalias)
                    meta_test_failedverdicts_outlog[0][meta_testcase] = \
                                                test_failed_verdicts[testcase]
                    meta_test_failedverdicts_outlog[1][meta_testcase] = \
                                                    test_execoutput[testcase]
                    if not found_a_failure \
                                and test_failed_verdicts[testcase] == \
                                common_mix.GlobalConstants.FAIL_TEST_VERDICT:
                        found_a_failure = True
                    if not test_error \
                                and test_failed_verdicts[testcase] == \
                            common_mix.GlobalConstants.TEST_EXECUTION_ERROR:
                        test_error = True

                # @Checkpoint: Chekpointing
                checkpoint_handler.do_checkpoint(func_name=cp_func_name, \
                                taskid=cp_task_id, \
                                tool=ttoolalias, \
                                opt_payload=meta_test_failedverdicts_outlog)
            return found_a_failure, test_error

        #~ def tool_parallel_test_exec()

        if len(cand_alias_joblib) > 0:
            parallel_count_ = min(len(cand_alias_joblib), parallel_test_count)
            joblib.Parallel(n_jobs=parallel_count_, require='sharedmem')\
                    (joblib.delayed(tool_parallel_test_exec)(ttoolalias) \
                        for ttoolalias in cand_alias_joblib)
        if len(cand_alias_for) > 0:
            for tpos, ttoolalias in enumerate(cand_alias_for):
                found_a_failure, test_error = \
                                        tool_parallel_test_exec(ttoolalias)
                if stop_on_failure and found_a_failure:
                    # @Checkpoint: Chekpointing for remaining tools
                    for rem_tool in list(testcases_by_tool.keys())[tpos + 1:]:
                        checkpoint_handler.do_checkpoint(\
                                func_name=cp_func_name, \
                                taskid=cp_task_id, \
                                tool=rem_tool, \
                                opt_payload=meta_test_failedverdicts_outlog)
                    break

        if stop_on_failure:
            # Make sure the non executed test has the uncertain value (None)
            if len(meta_test_failedverdicts_outlog[0]) < len(meta_testcases):
                for meta_testcase in set(meta_testcases) - \
                                    set(meta_test_failedverdicts_outlog[0]):
                    meta_test_failedverdicts_outlog[0][meta_testcase] = \
                            common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT
                    meta_test_failedverdicts_outlog[1][meta_testcase] = \
                            common_matrices.OutputLogData.\
                                                    UNCERTAIN_TEST_OUTLOGDATA

        ERROR_HANDLER.assert_true(len(meta_test_failedverdicts_outlog[0]) == \
                                                        len(meta_testcases), \
                    "mismatch between number of tests and reported verdicts:"
                    + " Tests without verdict are {};".format(\
                               set(meta_testcases) - \
                                    set(meta_test_failedverdicts_outlog[0])) \
                    + " Test not in testlist are {}.".format(\
                               set(meta_test_failedverdicts_outlog[0]) - \
                                    set(meta_testcases)), \
                                                                     __file__)

        # For fdupes
        if len(self.tests_duplicates_map) > 0:
            meta_testcases = meta_testcases_backup
            for i in (0, 1):
                for mtest in dups_remove_meta_testcases:
                    # add to results
                    meta_test_failedverdicts_outlog[i][mtest] = copy.deepcopy(\
                                        meta_test_failedverdicts_outlog[i]\
                                            [self.tests_duplicates_map[mtest]])
                for mtest in dup_toadd_test:
                    # remove from results
                    del meta_test_failedverdicts_outlog[i][mtest]

        if fault_test_execution_matrix_file is not None:
            # Load or Create the matrix
            fault_test_execution_matrix = common_matrices.ExecutionMatrix( \
                                filename=fault_test_execution_matrix_file, \
                                            non_key_col_list=meta_testcases)
            ERROR_HANDLER.assert_true(fault_test_execution_matrix.is_empty(), \
                                "matrix must be empty. Filename is:"
                                " "+fault_test_execution_matrix_file, __file__)
            failverdict2val = {
                common_mix.GlobalConstants.FAIL_TEST_VERDICT: \
                        fault_test_execution_matrix.getActiveCellDefaultVal(),
                common_mix.GlobalConstants.PASS_TEST_VERDICT: \
                            fault_test_execution_matrix.getInactiveCellVal(),
                common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT: \
                    fault_test_execution_matrix.getUncertainCellDefaultVal(),
            }
            cells_dict = {}
            for meta_testcase in meta_test_failedverdicts_outlog[0]:
                cells_dict[meta_testcase] = failverdict2val[\
                            meta_test_failedverdicts_outlog[0][meta_testcase]]

            fault_test_execution_matrix.add_row_by_key(self.FAULT_MATRIX_KEY, \
                                                cells_dict, serialize=True)

        if fault_test_execution_execoutput_file is not None:
            # Load or Create the data object
            fault_test_execution_execoutput = common_matrices.OutputLogData( \
                                filename=fault_test_execution_execoutput_file)
            ERROR_HANDLER.assert_true(\
                            fault_test_execution_execoutput.is_empty(), \
                                        "outlog data must be empty", __file__)
            fault_test_execution_execoutput.add_data(\
                                    {self.PROGRAM_EXECOUTPUT_KEY: \
                                         meta_test_failedverdicts_outlog[1]}, \
                                                                serialize=True)

        # @Checkpoint: Finished
        detailed_exectime = {}
        for ttoolalias in testcases_by_tool.keys():
            tt = self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY]
            detailed_exectime[ttoolalias] = (\
                        tt.get_checkpointer().get_execution_time(),\
                        tt.get_checkpointer().get_detailed_execution_time())

        checkpoint_handler.set_finished( \
                                    detailed_exectime_obj=detailed_exectime)

        if finish_destroy_checkpointer:
            checkpoint_handler.destroy()

        return meta_test_failedverdicts_outlog
Exemple #8
0
    def update_matrix_to_cover_when_difference(cls, \
                                target_matrix_file, target_outdata_file, \
                                comparing_vector_file, comparing_outdata_file):
        ERROR_HANDLER.assert_true(target_matrix_file is not None \
                                and comparing_vector_file is not None, \
                                "target or comparing matrix is None", __file__)

        target_matrix = common_matrices.ExecutionMatrix(\
                                                filename=target_matrix_file)
        comparing_vector = common_matrices.ExecutionMatrix(\
                                                filename=comparing_vector_file)

        ERROR_HANDLER.assert_true(len(\
                            set(target_matrix.get_nonkey_colname_list()) - \
                            set(comparing_vector.get_nonkey_colname_list()) \
                                     ) == 0, "Mismatch of columns", __file__)

        # Get uncertain
        target_uncertain_cols_dict = \
                                target_matrix.query_uncertain_columns_of_rows()
        vector_uncertain_cols = \
                            comparing_vector.query_uncertain_columns_of_rows()
        vector_uncertain_cols = \
                    set(vector_uncertain_cols[list(vector_uncertain_cols)[0]])

        # Check if outdata and proceed accordingly
        if target_outdata_file is not None and \
                                            comparing_outdata_file is not None:
            # outdata are set use outdata values to decide of differences
            target_outdata = common_matrices.OutputLogData(\
                                                filename=target_outdata_file)
            vector_outdata = common_matrices.OutputLogData(\
                                            filename=comparing_outdata_file)

            _, vector_outdata_uniq = list(\
                                vector_outdata.get_zip_objective_and_data())[0]

            ## Compare using output
            timedout_key = common_matrices.OutputLogData.TIMEDOUT
            key_to_diffs = {}
            for key, key_data in target_outdata.get_zip_objective_and_data():
                intersect = set(vector_outdata_uniq) & set(key_data)
                ERROR_HANDLER.assert_true(set(key_data) == intersect, \
                            "The elements in target must all be in vector",\
                                                                    __file__)
                key_to_diffs[key] = set()
                for elem in intersect:
                    ol_equiv = common_matrices.OutputLogData.outlogdata_equiv(\
                                    key_data[elem], vector_outdata_uniq[elem])
                    if not ol_equiv:
                        key_to_diffs[key].add(elem)
        else:
            # outdata is not set use difference of matrices
            ## obtain the active cols for each rows
            target_active_cols_dict = \
                                target_matrix.query_active_columns_of_rows()

            vector_active_cols = \
                                comparing_vector.query_active_columns_of_rows()
            vector_active_cols = \
                        set(vector_active_cols[list(vector_active_cols)[0]])

            ## for each row of target matrix, get diff
            target_matrix_allcol = set(target_matrix.get_nonkey_colname_list())
            key_to_diffs = {}
            for row_key, row_active in target_active_cols_dict.items():
                key_to_diffs[row_key] = (set(row_active) - vector_active_cols)\
                                    | (vector_active_cols - set(row_active))
                key_to_diffs[row_key] &= target_matrix_allcol

        # Clear matrix to inactive
        target_matrix.clear_cells_to_value(target_matrix.getInactiveCellVal())

        # Update matrix based on diff
        for key, diffs in key_to_diffs.items():
            # gather the uncertain and set to uncertain
            uncertain = vector_uncertain_cols | \
                                        set(target_uncertain_cols_dict[key])
            set_uncertain = uncertain & diffs
            diffs -= uncertain

            values = {}
            for col in set_uncertain:
                values[col] = target_matrix.getUncertainCellDefaultVal()
            for col in diffs:
                values[col] = target_matrix.getActiveCellDefaultVal()

            if len(values) > 0:
                target_matrix.update_cells(key, values)

        target_matrix.serialize()
    def runtests(self, meta_testcases=None, exe_path_map=None, env_vars=None, \
                        stop_on_failure=False, \
                        per_test_timeout=None, \
                        use_recorded_timeout_times=None, \
                        recalculate_execution_times=False, \
                        fault_test_execution_matrix_file=None, \
                        fault_test_execution_execoutput_file=None, \
                        with_outlog_hash=True, \
                        test_prioritization_module=None, \
                        parallel_test_count=1, \
                        parallel_test_scheduler=None, \
                        restart_checkpointer=False,
                        finish_destroy_checkpointer=True):
        '''
        Execute the list of test cases with the given executable and 
        say, for each test case, whether it failed

        :param meta_testcases: list of test cases to execute
        :param exe_path_map: string representing the file system path to 
                        the executable to execute with the tests
        :param env_vars: dict of environment variables to set before
                        executing each test ({<variable>: <value>})
        :param stop_on_failure: decide whether to stop the test 
                        execution once a test fails
        :param fault_test_execution_matrix_file: Optional matrix file 
                        to store the tests' pass fail execution data
        :param fault_test_execution_execoutput_file: Optional output log file 
                        to store the tests' execution actual output (hashed)
        :param with_outlog_hash: decide whether to return outlog hash 
        :param test_prioritization_module: Specify the test prioritization
                        module. 
                        (TODO: Implement support)
        :param parallel_test_count: Specify the number of parallel test
                        Execution. must be an integer >= 1
        :param parallel_test_scheduler: Specify the function that will
                        handle parallel test scheduling by tool, using
                        the test execution optimizer. 
                        (TODO: Implement support)

        :type restart_checkointer: bool
        :param restart_checkointer: Decide whether to discard checkpoint
                        and restart anew.

        :type finish_destroy_checkpointer: bool
        :param finish_destroy_checkpointer: Decide whether to automatically 
                        destroy the checkpointer when done or not
                        Useful is caller has a checkpointer to update. 

        :returns: dict of testcase and their failed verdict.
                 {<test case name>: <True if failed, False if passed,
                    UNCERTAIN_TEST_VERDICT if uncertain>}
                 If stop_on_failure is True, only return the tests that 
                 have been executed until the failure
        '''

        # FIXME: Make sure that the support are implemented for
        # parallelism and test prioritization. Remove the code bellow
        # once supported:
        ERROR_HANDLER.assert_true(test_prioritization_module is None, \
                        "Must implement test prioritization support here", \
                                                                    __file__)
        ERROR_HANDLER.assert_true(parallel_test_count <= 1, \
                    "Must implement parallel tests execution support here", \
                                                                    __file__)
        ERROR_HANDLER.assert_true(parallel_test_scheduler is None, \
                    "Must implement parallel tests execution support here", \
                                                                    __file__)
        #~FIXMEnd

        # Check arguments Validity
        if exe_path_map is None:
            exe_path_map = self._get_default_exe_path_map()

        ERROR_HANDLER.assert_true(parallel_test_count > 0, \
                    "invalid parallel test execution count: {}. {}".format( \
                                    parallel_test_count, "must be >= 1"))

        # @Checkpoint: create a checkpoint handler
        cp_func_name = "runtests"
        cp_task_id = 1
        checkpoint_handler = \
                CheckPointHandler(self.get_checkpoint_state_object())
        if restart_checkpointer:
            checkpoint_handler.restart()
        if checkpoint_handler.is_finished():
            logging.warning("%s %s" %("The function 'runtests' is finished", \
                "according to checkpoint, but called again. None returned"))
            if common_mix.confirm_execution("%s %s" % ( \
                                        "Function 'runtests' is already", \
                                        "finished, do you want to restart?")):
                checkpoint_handler.restart()
                logging.info("Restarting the finished 'runtests'")
            else:
                ERROR_HANDLER.error_exit(err_string="%s %s %s" % (\
                        "Execution halted. Cannot continue because no value", \
                        " can be returned. Check the results of the", \
                        "finished execution"), call_location=__file__)

        # @Checkpoint: Get the saved payload (data kapt for each tool)
        # pair list of testfailed verdict and execution output
        meta_test_failedverdicts_outlog = \
                                    checkpoint_handler.get_optional_payload()
        if meta_test_failedverdicts_outlog is None:
            meta_test_failedverdicts_outlog = [{}, {}]

        # Make sure the tests are unique
        ERROR_HANDLER.assert_true(len(meta_testcases) == \
                                                len(set(meta_testcases)), \
                                        "not all tests are unique", __file__)

        testcases_by_tool = {}
        for meta_testcase in meta_testcases:
            ttoolalias, testcase = \
                            DriversUtils.reverse_meta_element(meta_testcase)
            if ttoolalias not in testcases_by_tool:
                testcases_by_tool[ttoolalias] = []
            testcases_by_tool[ttoolalias].append(testcase)

        found_a_failure = False
        for tpos, ttoolalias in enumerate(testcases_by_tool.keys()):
            # @Checkpoint: Check whether already executed
            if not checkpoint_handler.is_to_execute(func_name=cp_func_name, \
                                                taskid=cp_task_id, \
                                                tool=ttoolalias):
                continue

            # Actual execution
            ttool = \
                self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY]
            test_failed_verdicts, test_execoutput = ttool.runtests( \
                                            testcases_by_tool[ttoolalias], \
                                            exe_path_map, env_vars, \
                                            stop_on_failure, \
                                            per_test_timeout=per_test_timeout,
                                            use_recorded_timeout_times=\
                                                use_recorded_timeout_times, \
                                            recalculate_execution_times=\
                                                recalculate_execution_times, \
                                            with_outlog_hash=with_outlog_hash)
            for testcase in test_failed_verdicts:
                meta_testcase =  \
                        DriversUtils.make_meta_element(testcase, ttoolalias)
                meta_test_failedverdicts_outlog[0][meta_testcase] = \
                                                test_failed_verdicts[testcase]
                meta_test_failedverdicts_outlog[1][meta_testcase] = \
                                                    test_execoutput[testcase]
                if test_failed_verdicts[testcase] == \
                                common_mix.GlobalConstants.COMMAND_UNCERTAIN:
                    found_a_failure = True

            # @Checkpoint: Chekpointing
            checkpoint_handler.do_checkpoint(func_name=cp_func_name, \
                                taskid=cp_task_id, \
                                tool=ttoolalias, \
                                opt_payload=meta_test_failedverdicts_outlog)

            if stop_on_failure and found_a_failure:
                # @Checkpoint: Chekpointing for remaining tools
                for rem_tool in list(testcases_by_tool.keys())[tpos + 1:]:
                    checkpoint_handler.do_checkpoint(func_name=cp_func_name, \
                                taskid=cp_task_id, \
                                tool=rem_tool, \
                                opt_payload=meta_test_failedverdicts_outlog)
                break

        if stop_on_failure:
            # Make sure the non executed test has the uncertain value (None)
            if len(meta_test_failedverdicts_outlog[0]) < len(meta_testcases):
                for meta_testcase in set(meta_testcases) - \
                                    set(meta_test_failedverdicts_outlog[0]):
                    meta_test_failedverdicts_outlog[0][meta_testcase] = \
                            common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT
                    meta_test_failedverdicts_outlog[1][meta_testcase] = \
                            common_matrices.OutputLogData.\
                                                    UNCERTAIN_TEST_OUTLOGDATA

        ERROR_HANDLER.assert_true(len(meta_test_failedverdicts_outlog[0]) == \
                                                        len(meta_testcases), \
                            "Not all tests have a verdict reported", __file__)

        if fault_test_execution_matrix_file is not None:
            # Load or Create the matrix
            fault_test_execution_matrix = common_matrices.ExecutionMatrix( \
                                filename=fault_test_execution_matrix_file, \
                                            non_key_col_list=meta_testcases)
            ERROR_HANDLER.assert_true(fault_test_execution_matrix.is_empty(), \
                                            "matrix must be empty", __file__)
            failverdict2val = {
                common_mix.GlobalConstants.FAIL_TEST_VERDICT: \
                        fault_test_execution_matrix.getActiveCellDefaultVal(),
                common_mix.GlobalConstants.PASS_TEST_VERDICT: \
                            fault_test_execution_matrix.getInactiveCellVal(),
                common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT: \
                    fault_test_execution_matrix.getUncertainCellDefaultVal(),
            }
            cells_dict = {}
            for meta_testcase in meta_test_failedverdicts_outlog[0]:
                cells_dict[meta_testcase] = failverdict2val[\
                            meta_test_failedverdicts_outlog[0][meta_testcase]]

            fault_test_execution_matrix.add_row_by_key(self.FAULT_MATRIX_KEY, \
                                                cells_dict, serialize=True)

        if fault_test_execution_execoutput_file is None:
            meta_test_failedverdicts_outlog[1] = None
        else:
            # Load or Create the data object
            fault_test_execution_execoutput = common_matrices.OutputLogData( \
                                filename=fault_test_execution_execoutput_file)
            ERROR_HANDLER.assert_true(\
                            fault_test_execution_execoutput.is_empty(), \
                                        "outlog data must be empty", __file__)
            fault_test_execution_execoutput.add_data(\
                                    {self.PROGRAM_EXECOUTPUT_KEY: \
                                         meta_test_failedverdicts_outlog[1]}, \
                                                                serialize=True)

        # @Checkpoint: Finished
        detailed_exectime = {}
        for ttoolalias in testcases_by_tool.keys():
            tt = self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY]
            detailed_exectime[ttoolalias] = (\
                        tt.get_checkpointer().get_execution_time(),\
                        tt.get_checkpointer().get_detailed_execution_time())

        checkpoint_handler.set_finished( \
                                    detailed_exectime_obj=detailed_exectime)

        if finish_destroy_checkpointer:
            checkpoint_handler.destroy()

        return meta_test_failedverdicts_outlog
    def runtests_criteria_coverage (self, testcases, criterion_to_matrix, \
                                    criterion_to_executionoutput=None,
                                    criteria_element_list_by_criteria=None, \
                                    re_instrument_code=False, \
                                    cover_criteria_elements_once=False,
                                    prioritization_module_by_criteria=None,
                                    parallel_count=1, \
                                    parallel_criteria_test_scheduler=None,\
                                    restart_checkpointer=False, \
                                    finish_destroy_checkpointer=True):
        ''' 
        Executes the instrumented executable code with testscases and
        returns the different code coverage matrices.

        :param testcases: list of testcases to execute

        :param criterion_to_matrix: dict of <criterion, Matrix file 
                        where to store coverage>. 
        :param criterion_to_executionoutput: dict of <criterion, execoutput 
                        file where to store coverage>. 
        
        :param criteria_element_list_by_criteria: dictionary representing the
                        list of criteria elements (stmts, branches, mutants)
                        to consider in the test execution matices. 
                        Key is the criterion and the value the list of elements

        :param re_instrument_code: Decide whether to instrument code before 
                        running the tests. (Example when instrumentation was 
                        not specifically called. This is True by default)

        :param cover_criteria_elements_once: Specify whether to cover criteria
                        elements once is enough, meaning that we stop 
                        analysing a criterion element once a test covers it.
                        The remaining test covering verdict will be UNKNOWN. 

        :param prioritization_module_by_criteria: dict of prioritization module
                        by criteria. None means no prioritization used.

        :type \parallel_count:
        :param \parallel_count:

        :type \parallel_criteria_test_scheduler:
        :param \parallel_criteria_test_scheduler: scheduler that organize 
                        parallelism across criteria tools.
                        (TODO: Implement support)

        :type \restart_checkpointer:
        :param \restart_checkpointer:

        :type finish_destroy_checkpointer:
        :param finish_destroy_checkpointer:
        '''

        # FIXME: Make sure that the support are implemented for
        # parallelism and test prioritization. Remove the code bellow
        # once supported:
        ERROR_HANDLER.assert_true(parallel_count <= 1, \
                    "Must implement parallel execution support here", \
                                                                    __file__)
        ERROR_HANDLER.assert_true(parallel_criteria_test_scheduler is None, \
            "Must implement parallel codes tests execution support here", \
                                                                    __file__)
        #~FIXMEnd

        # Check arguments Validity
        ERROR_HANDLER.assert_true(parallel_count > 0, \
                    "invalid parallel  execution count: {}. {}".format( \
                                    parallel_count, "must be >= 1"))

        # @Checkpoint: create a checkpoint handler
        cp_func_name = "runtests_criteria_coverage"
        cp_task_id = 1
        checkpoint_handler = CheckPointHandler( \
                                            self.get_checkpoint_state_object())
        if restart_checkpointer:
            checkpoint_handler.restart()
        if checkpoint_handler.is_finished():
            return

        ERROR_HANDLER.assert_true(len(criterion_to_matrix) > 0, \
                                        "no criterion is enabled", __file__)

        ERROR_HANDLER.assert_true(len(set(criterion_to_matrix) - \
                            set(self.tools_config_by_criterion_dict)) == 0, \
                    "Passed matrices output are more than tool specified", \
                                                                    __file__)

        if criterion_to_executionoutput is not None:
            ERROR_HANDLER.assert_true(set(criterion_to_matrix) == \
                                        set(criterion_to_executionoutput), \
                            "criteria mismatch between matrix and output", \
                                                                    __file__)

        tool2criteria = self._get_tool2criteria(criterion_to_matrix.keys())

        matrices_dir_tmp = os.path.join(self.criteria_working_dir, \
                                                            "codecov_dir.tmp")
        if os.path.isdir(matrices_dir_tmp):
            if restart_checkpointer:
                shutil.rmtree(matrices_dir_tmp)
                os.mkdir(matrices_dir_tmp)
        else:
            os.mkdir(matrices_dir_tmp)

        if criteria_element_list_by_criteria is None:
            criteria_element_list_by_criteria = \
                                        {c: None for c in criterion_to_matrix}

        # get criteria elements by tools
        criteria_elem_list_by_tool = {}
        for criterion in criteria_element_list_by_criteria:
            if criteria_element_list_by_criteria[criterion] is None:
                for t_conf in self.tools_config_by_criterion_dict[criterion]:
                    toolalias = t_conf.get_tool_config_alias()
                    if toolalias not in criteria_elem_list_by_tool:
                        criteria_elem_list_by_tool[toolalias] = {}
                    criteria_elem_list_by_tool[toolalias][criterion] = None
                continue

            ERROR_HANDLER.assert_true(\
                    len(criteria_element_list_by_criteria[criterion]) != 0, \
                    "Empty criteria element list for criterion "\
                                            +criterion.get_str(), __file__)
            for crit_elem in criteria_element_list_by_criteria[criterion]:
                toolalias, elem = DriversUtils.reverse_meta_element(crit_elem)
                if toolalias not in criteria_elem_list_by_tool:
                    criteria_elem_list_by_tool[toolalias] = {}
                if criterion not in criteria_elem_list_by_tool[toolalias]:
                    criteria_elem_list_by_tool[toolalias][criterion] = []
                criteria_elem_list_by_tool[toolalias][criterion].append(elem)

            ERROR_HANDLER.assert_true(len(set(criteria_elem_list_by_tool) - \
                                set(self.criteria_configured_tools)) == 0, \
                                "some tools in data not registered", __file__)

        crit2tool2matrixfile = {cv: {} for cv in criterion_to_matrix}
        crit2tool2outhashfile = {cv: {} for cv in criterion_to_executionoutput}
        for ctoolalias in tool2criteria:
            _criteria2matrix = {}
            _criteria2outhash = {}
            for criterion in tool2criteria[ctoolalias]:
                _criteria2matrix[criterion] = os.path.join(matrices_dir_tmp, \
                                                criterion.get_field_value()
                                                                + '-'
                                                                + ctoolalias
                                                                + '.csv')
                if criterion_to_executionoutput is None or \
                            criterion_to_executionoutput[criterion] is None:
                    _criteria2outhash[criterion] = None
                else:
                    _criteria2outhash[criterion] = \
                                            os.path.join(matrices_dir_tmp, \
                                                criterion.get_field_value()
                                                        + '-'
                                                        + ctoolalias
                                                        + '.outloghash.json')
                crit2tool2matrixfile[criterion][ctoolalias] = \
                                                    _criteria2matrix[criterion]
                crit2tool2outhashfile[criterion][ctoolalias] = \
                                                _criteria2outhash[criterion]

            # @Checkpoint: Check whether already executed
            if checkpoint_handler.is_to_execute( \
                                        func_name=cp_func_name, \
                                        taskid=cp_task_id, \
                                        tool=ctoolalias):
                for criterion in _criteria2matrix:
                    _criteria2matrix[criterion] = \
                                        common_matrices.ExecutionMatrix( \
                                        filename=_criteria2matrix[criterion], \
                                        non_key_col_list=testcases)
                    if _criteria2outhash[criterion] is not None:
                        _criteria2outhash[criterion] = \
                                        common_matrices.OutputLogData( \
                                        filename=_criteria2outhash[criterion])
                # Actual execution
                ctool = self.criteria_configured_tools[ctoolalias][\
                                                            self.TOOL_OBJ_KEY]
                ctool.runtests_criteria_coverage(testcases, \
                                criteria_element_list_by_criteria=\
                                        criteria_elem_list_by_tool[toolalias],\
                                criterion_to_matrix=_criteria2matrix, \
                                criterion_to_executionoutput=\
                                                            _criteria2outhash,\
                                re_instrument_code=re_instrument_code, \
                                cover_criteria_elements_once=\
                                                cover_criteria_elements_once, \
                                prioritization_module_by_criteria=\
                                            prioritization_module_by_criteria)

                # Checkpointing
                checkpoint_handler.do_checkpoint( \
                                        func_name=cp_func_name, \
                                        taskid=cp_task_id, \
                                        tool=ctoolalias)

        # Aggregate the matrices and out hashes
        ## Create reult matrices and out hashes
        result_matrices = {}
        result_outloghashes = {}
        for criterion in criterion_to_matrix:
            result_matrices[criterion] = common_matrices.ExecutionMatrix( \
                                filename=criterion_to_matrix[criterion], \
                                non_key_col_list=testcases)
            if criterion_to_executionoutput[criterion] is None:
                result_outloghashes[criterion] = None
            else:
                result_outloghashes[criterion] = \
                            common_matrices.OutputLogData(filename=\
                                    criterion_to_executionoutput[criterion])
                ERROR_HANDLER.assert_true(\
                            crit2tool2outhashfile[criterion] is not None,
                            "Bug: log enabled but hidden from tool", __file__)
        ## Actual aggregate
        logging.debug("saving results ...")
        for criterion in result_matrices:
            result_matrix = result_matrices[criterion]
            result_outloghash = result_outloghashes[criterion]
            for mtoolalias in crit2tool2matrixfile[criterion]:
                tool_matrix = common_matrices.ExecutionMatrix(\
                        filename=crit2tool2matrixfile[criterion][mtoolalias])

                # Check columns
                ERROR_HANDLER.assert_true(tool_matrix.get_key_colname() == \
                                            result_matrix.get_key_colname(), \
                                    "mismatch on key column name", __file__)
                ERROR_HANDLER.assert_true( \
                                set(tool_matrix.get_nonkey_colname_list()) == \
                                set(result_matrix.get_nonkey_colname_list()), \
                                "mismatch on non key column names", __file__)

                # bring in the data
                key2nonkeydict = tool_matrix.to_pandas_df().\
                        set_index(tool_matrix.get_key_colname(), drop=True).\
                                                to_dict(orient="index")

                for c_key in key2nonkeydict:
                    meta_c_key = DriversUtils.make_meta_element(\
                                                        str(c_key), mtoolalias)
                    result_matrix.add_row_by_key(meta_c_key,
                                                 key2nonkeydict[c_key],
                                                 serialize=False)

                # out log hash
                if crit2tool2outhashfile[criterion] is not None:
                    tool_outloghash = common_matrices.OutputLogData(\
                            filename=\
                                crit2tool2outhashfile[criterion][mtoolalias])
                    for objective, objective_data in \
                                tool_outloghash.get_zip_objective_and_data():
                        meta_objective = DriversUtils.make_meta_element(\
                                                    str(objective), mtoolalias)
                        result_outloghash.add_data(
                                            {meta_objective: objective_data}, \
                                            serialize=False)

            # @Checkpoint: Check whether already executed
            if checkpoint_handler.is_to_execute( \
                                        func_name=cp_func_name, \
                                        taskid=cp_task_id + 1,
                                        tool=criterion.get_str()):
                # Serialized the computed matrix
                result_matrix.serialize()
                if result_outloghash is not None:
                    result_outloghash.serialize()
            # @Checkpoint: Checkpointing
            checkpoint_handler.do_checkpoint( \
                                    func_name=cp_func_name, \
                                    taskid=cp_task_id + 1,
                                    tool=criterion.get_str())

        # Delete the temporary tool matrix's directory
        if os.path.isdir(matrices_dir_tmp):
            shutil.rmtree(matrices_dir_tmp)

        # @Checkpoint: Finished
        detailed_exectime = {}
        for ctoolalias in tool2criteria:
            ct = self.criteria_configured_tools[ctoolalias][self.TOOL_OBJ_KEY]
            detailed_exectime[ctoolalias] = (\
                        ct.get_checkpointer().get_execution_time(),\
                        ct.get_checkpointer().get_detailed_execution_time())

        checkpoint_handler.set_finished(\
                                    detailed_exectime_obj=detailed_exectime)

        if finish_destroy_checkpointer:
            checkpoint_handler.destroy()
Exemple #11
0
    def update_matrix_to_cover_when_diference(cls, \
                                target_matrix_file, target_outdata_file, \
                                comparing_vector_file, comparing_outdata_file):
        ERROR_HANDLER.assert_true(target_matrix_file is not None \
                                and comparing_vector_file is not None, \
                                "target or comparing matrix is None", __file__)

        target_matrix = common_matrices.ExecutionMatrix(\
                                                filename=target_matrix_file)
        comparing_vector = common_matrices.ExecutionMatrix(\
                                                filename=comparing_vector_file)

        ERROR_HANDLER.assert_true(\
                            set(target_matrix.get_nonkey_colname_list()) == \
                            set(comparing_vector.get_nonkey_colname_list()), \
                                            "Mismatch of columns", __file__)

        # Get uncertain
        target_uncertain_cols_dict = \
                                target_matrix.query_uncertain_columns_of_rows()
        vector_uncertain_cols = \
                            comparing_vector.query_uncertain_columns_of_rows()
        vector_uncertain_cols = \
                    set(vector_uncertain_cols[list(vector_uncertain_cols)[0]])

        # Check if outdata and proceed accordingly
        if target_outdata_file is not None and \
                                            comparing_outdata_file is not None:
            # outdata are set use outdata values to decide of differences
            target_outdata = common_matrices.OutputLogData(\
                                                filename=target_outdata_file)
            vector_outdata = common_matrices.OutputLogData(\
                                            filename=comparing_outdata_file)

            vector_outdata_uniq, _ = list(\
                                vector_outdata.get_zip_objective_and_data())[0]

            ## Compare using output
            key_to_diffs = {}
            for key, key_data in target_outdata.get_zip_objective_and_data():
                intersect = set(vector_outdata_uniq) & set(key_data)
                key_to_diffs[key] = (set(vector_outdata_uniq) | \
                                                    set(key_data)) - intersect
                for elem in intersect:
                    if key_data[elem] != vector_outdata_uniq[elem]:
                        key_to_diffs[key].add(elem)
        else:
            # outdata is not set use difference of matrices
            ## obtain the active cols for each rows
            target_active_cols_dict = \
                                target_matrix.query_active_columns_of_rows()
            
            vector_active_cols = \
                                comparing_vector.query_active_columns_of_rows()
            vector_active_cols = \
                        set(vector_active_cols[list(vector_active_cols)[0]])

            ## for each row of target matrix, get diff
            key_to_diffs = {}
            for row_key, row_active in target_active_cols_dict.items():
                key_to_diffs[row_key] = (set(row_active) - vector_active_cols)\
                                    | (vector_active_cols - set(row_active))

        # Update matrix based on diff
        for key, diffs in key_to_diffs.items():
            # gather the uncertain and set to uncertain
            uncertain = vector_uncertain_cols | \
                                        set(target_uncertain_cols_dict[key])
            set_uncertain = uncertain & diffs
            diffs -= uncertain

            values = {}
            for col in set_uncertain:
                values[col] = target_matrix.getUncertainCellDefaultVal()
            for col in diffs:
                values[col] = target_matrix.getActiveCellDefaultVal()

            if len(values) > 0:
                target_matrix.update_cells(key, values)

        target_matrix.serialize()
    #~ def update_matrix_to_cover_when_diference()
#~class DriversUtils()