def generate_tests (self, exe_path_map, \ meta_criteria_tool_obj=None, \ parallel_count=1, \ code_builds_factory_override=None, max_time=None): ''' ''' logging.debug("# Generating tests with {} ...".format(\ self.config.get_tool_config_alias())) # @Checkpoint: create a checkpoint handler (for time) checkpoint_handler = CheckPointHandler(self.get_checkpointer()) if checkpoint_handler.is_finished(): return outputdir = self.tests_storage_dir if code_builds_factory_override is None: code_builds_factory_override = self.code_builds_factory if os.path.isdir(outputdir): shutil.rmtree(outputdir) # If compressing test storage dir, remove archive if working on # the default test storage dir if self.compress_test_storage_dir \ and not os.path.isdir(self.tests_storage_dir): if os.path.isfile(self.tests_storage_dir_archive): os.remove(self.tests_storage_dir_archive) os.mkdir(outputdir) self._do_generate_tests (exe_path_map, \ code_builds_factory=code_builds_factory_override, \ meta_criteria_tool_obj=meta_criteria_tool_obj, \ max_time=max_time) # Compress test storage dir? if self.compress_test_storage_dir: if os.path.isdir(self.tests_storage_dir): common_fs.TarGz.compressDir(self.tests_storage_dir, \ self.tests_storage_dir_archive, \ remove_in_directory=False) # @Checkpoint: Finished (for time) checkpoint_handler.set_finished(None)
def instrument_code (self, enabled_criteria, exe_path_map=None, \ outputdir=None, code_builds_factory_override=None, \ parallel_count=1): ''' (TODO: support parallelism: per test outdata) ''' # FIXME: Support parallelism, then remove the code # bellow: ERROR_HANDLER.assert_true(parallel_count <= 1, \ "FIXME: Must first implement support for parallel mutatio") #~ FXIMEnd # @Checkpoint: create a checkpoint handler (for time) checkpoint_handler = CheckPointHandler(self.get_checkpointer()) if not checkpoint_handler.is_finished(): ERROR_HANDLER.assert_true(len(enabled_criteria) > 0, \ "no criterion is enabled", __file__) if outputdir is None: outputdir = self.instrumented_code_storage_dir if os.path.isdir(outputdir): shutil.rmtree(outputdir) os.mkdir(outputdir) if code_builds_factory_override is None: code_builds_factory_override = self.code_builds_factory if exe_path_map is None: exe_path_map = code_builds_factory_override.repository_manager\ .get_exe_path_map() self._do_instrument_code (outputdir=outputdir, \ exe_path_map=exe_path_map, \ code_builds_factory=code_builds_factory_override, \ enabled_criteria=enabled_criteria, \ parallel_count=parallel_count) # @Checkpoint: Finished (for time) checkpoint_handler.set_finished(None)
def generate_tests (self, exe_path_map, parallel_count=1, outputdir=None, \ code_builds_factory_override=None, max_time=None): ''' ''' # @Checkpoint: create a checkpoint handler (for time) checkpoint_handler = CheckPointHandler(self.get_checkpointer()) if checkpoint_handler.is_finished(): return if outputdir is None: outputdir = self.tests_storage_dir if code_builds_factory_override is None: code_builds_factory_override = self.code_builds_factory if os.path.isdir(outputdir): shutil.rmtree(outputdir) os.mkdir(outputdir) self._do_generate_tests (exe_path_map, outputdir=outputdir, \ code_builds_factory=code_builds_factory_override, \ max_time=max_time) # @Checkpoint: Finished (for time) checkpoint_handler.set_finished(None)
def instrument_code (self, enabled_criteria, exe_path_map=None, \ code_builds_factory_override=None, \ parallel_count=1): ''' (TODO: support parallelism: per test outdata) ''' logging.debug("# Instrumenting code with {} ...".format(\ self.config.get_tool_config_alias())) # FIXME: Support parallelism, then remove the code # bellow: ERROR_HANDLER.assert_true(parallel_count <= 1, \ "FIXME: Must first implement support for parallel mutatio") #~ FXIMEnd # @Checkpoint: create a checkpoint handler (for time) checkpoint_handler = CheckPointHandler(self.get_checkpointer()) if not checkpoint_handler.is_finished(): ERROR_HANDLER.assert_true(len(enabled_criteria) > 0, \ "no criterion is enabled", __file__) if code_builds_factory_override is None: code_builds_factory_override = self.code_builds_factory if exe_path_map is None: exe_path_map = code_builds_factory_override.repository_manager\ .get_exe_path_map() self._do_instrument_code ( \ exe_path_map=exe_path_map, \ code_builds_factory=code_builds_factory_override, \ enabled_criteria=enabled_criteria, \ parallel_count=parallel_count) # @Checkpoint: Finished (for time) checkpoint_handler.set_finished(None)
def generate_tests (self, meta_criteria_tool_obj=None, \ exe_path_map=None, \ test_tool_type_list=None, \ max_time=None, \ test_generation_guidance_obj=None, \ parallel_testgen_count=1, \ restart_checkpointer=False, \ finish_destroy_checkpointer=True): """ This method should be used to generate the tests and must always have a single instance running (it has single checkpoint file). Note: The caller must explicitely destroy the checkpointer after this call succeed, to ensure that a sceduler will not re-execute this :type meta_criteria_tool_obj: :param meta_criteria_tool_obj: :type exe_path_map: :param exe_path_map: :type test_tool_type_list: :param test_tool_type_list: :type \test_generation_guidance_obj: :param \test_generation_guidance_obj: :type \parallel_testgen_count: :param \parallel_testgen_count: :type restart_checkointer: bool :param restart_checkointer: Decide whether to discard checkpoint and restart anew. :type finish_destroy_checkpointer: bool :param finish_destroy_checkpointer: Decide whether to automatically destroy the checkpointer when done or not Useful is caller has a checkpointer to update. :raises: :rtype: """ # FIXME: Support test_generation_guidance_obj, then remove the code # bellow: ERROR_HANDLER.assert_true(test_generation_guidance_obj is None, \ "FIXME: Must first implement support for test gen guidance") ERROR_HANDLER.assert_true(parallel_testgen_count <= 1, \ "FIXME: Must first implement support for parallel test gen") #~ FXIMEnd # Check arguments Validity if exe_path_map is None: exe_path_map = self._get_default_exe_path_map() ERROR_HANDLER.assert_true(parallel_testgen_count > 0, \ "invalid parallel test generation count: {}. {}".format( \ parallel_testgen_count, "must be >= 1")) if test_tool_type_list is None: candidate_tools_aliases = self.testcases_configured_tools.keys() else: ERROR_HANDLER.assert_true(len(test_tool_type_list) > 0,\ "Invalid test_tool_type_list passed (empty)", \ __file__) candidate_tools_aliases = self.get_candidate_tools_aliases(\ test_tool_type_list) # @Checkpoint: create a checkpoint handler cp_func_name = "generate_tests" if test_tool_type_list is not None: for test_tool_type in sorted(test_tool_type_list, \ key=lambda x: x.get_str()): cp_func_name += ":" + test_tool_type.get_str() cp_task_id = 1 checkpoint_handler = CheckPointHandler(\ self.get_checkpoint_state_object()) if restart_checkpointer: checkpoint_handler.restart() if checkpoint_handler.is_finished(): return # Generate for ttoolalias in candidate_tools_aliases: ttool = self.testcases_configured_tools[ttoolalias]\ [self.TOOL_OBJ_KEY] # Make sure to execute the right one if meta_criteria_tool_obj is None: if ttool.requires_criteria_instrumented(): continue else: if not ttool.requires_criteria_instrumented(): continue # Check whether already executed if checkpoint_handler.is_to_execute(func_name=cp_func_name, \ taskid=cp_task_id, \ tool=ttoolalias): # Actual Execution ttool.generate_tests(exe_path_map, \ meta_criteria_tool_obj=meta_criteria_tool_obj, \ max_time=max_time) # @Checkpoint: Checkpointing checkpoint_handler.do_checkpoint(func_name=cp_func_name, \ taskid=cp_task_id, \ tool=ttoolalias) # Invalidate any existing testcase info so it can be recomputed self._invalidate_testcase_info() self._update_tests_duplicates_map(recompute=True) # @Checkpoint: Finished detailed_exectime = {} for ttoolalias in candidate_tools_aliases: tt = self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY] detailed_exectime[ttoolalias] = (\ tt.get_checkpointer().get_execution_time(),\ tt.get_checkpointer().get_detailed_execution_time()) checkpoint_handler.set_finished( \ detailed_exectime_obj=detailed_exectime) if finish_destroy_checkpointer: checkpoint_handler.destroy()
def runtests(self, meta_testcases=None, exe_path_map=None, env_vars=None, \ stop_on_failure=False, \ per_test_timeout=None, \ use_recorded_timeout_times=None, \ recalculate_execution_times=False, \ fault_test_execution_matrix_file=None, \ fault_test_execution_execoutput_file=None, \ with_output_summary=True, \ hash_outlog=None, \ test_prioritization_module=None, \ parallel_test_count=1, \ parallel_test_scheduler=None, \ restart_checkpointer=False, finish_destroy_checkpointer=True): ''' Execute the list of test cases with the given executable and say, for each test case, whether it failed :param meta_testcases: list of test cases to execute :param exe_path_map: string representing the file system path to the executable to execute with the tests :param env_vars: dict of environment variables to set before executing each test ({<variable>: <value>}) :param stop_on_failure: decide whether to stop the test execution once a test fails :param fault_test_execution_matrix_file: Optional matrix file to store the tests' pass fail execution data :param fault_test_execution_execoutput_file: Optional output log file to store the tests' execution actual output (hashed) :param with_output_summary: decide whether to return outlog hash :type hash_outlog: bool :hash_outlog: decide whether to hash the outlog or not :param test_prioritization_module: Specify the test prioritization module. (TODO: Implement support) :param parallel_test_count: Specify the number of parallel test Execution. must be an integer >= 1 or None. When None, the max possible value is used. :param parallel_test_scheduler: Specify the function that will handle parallel test scheduling by tool, using the test execution optimizer. (TODO: Implement support) :type restart_checkointer: bool :param restart_checkointer: Decide whether to discard checkpoint and restart anew. :type finish_destroy_checkpointer: bool :param finish_destroy_checkpointer: Decide whether to automatically destroy the checkpointer when done or not Useful is caller has a checkpointer to update. :returns: dict of testcase and their failed verdict. {<test case name>: <True if failed, False if passed, UNCERTAIN_TEST_VERDICT if uncertain>} If stop_on_failure is True, only return the tests that have been executed until the failure ''' ERROR_HANDLER.assert_true(meta_testcases is not None, \ "Must specify testcases", __file__) # FIXME: Make sure that the support are implemented for # parallelism and test prioritization. Remove the code bellow # once supported: ERROR_HANDLER.assert_true(test_prioritization_module is None, \ "Must implement test prioritization support here", \ __file__) ERROR_HANDLER.assert_true(parallel_test_scheduler is None, \ "Must implement parallel tests execution support here", \ __file__) #~FIXMEnd # Check arguments Validity if exe_path_map is None: exe_path_map = self._get_default_exe_path_map() if hash_outlog is None: hash_outlog = self.hash_outlog ERROR_HANDLER.assert_true(parallel_test_count is None \ or parallel_test_count >= 1, \ "invalid parallel tests count ({})".format(\ parallel_test_count), __file__) # @Checkpoint: create a checkpoint handler cp_func_name = "runtests" cp_task_id = 1 checkpoint_handler = \ CheckPointHandler(self.get_checkpoint_state_object()) if restart_checkpointer: checkpoint_handler.restart() if checkpoint_handler.is_finished(): logging.warning("%s %s" %("The function 'runtests' is finished", \ "according to checkpoint, but called again. None returned")) if common_mix.confirm_execution("%s %s" % ( \ "Function 'runtests' is already", \ "finished, do you want to restart?")): checkpoint_handler.restart() logging.info("Restarting the finished 'runtests'") else: ERROR_HANDLER.error_exit(err_string="%s %s %s" % (\ "Execution halted. Cannot continue because no value", \ " can be returned. Check the results of the", \ "finished execution"), call_location=__file__) # @Checkpoint: Get the saved payload (data kapt for each tool) # pair list of testfailed verdict and execution output meta_test_failedverdicts_outlog = \ checkpoint_handler.get_optional_payload() if meta_test_failedverdicts_outlog is None: meta_test_failedverdicts_outlog = [{}, {}] # Make sure the tests are unique ERROR_HANDLER.assert_true(len(meta_testcases) == \ len(set(meta_testcases)), \ "not all tests are unique", __file__) # For fdupes if len(self.tests_duplicates_map) > 0: meta_testcases_backup = meta_testcases meta_testcases = set(meta_testcases) dups_remove_meta_testcases = meta_testcases & \ set(self.tests_duplicates_map) dup_toadd_test = {self.tests_duplicates_map[v] for v in \ dups_remove_meta_testcases} - meta_testcases meta_testcases = (meta_testcases - dups_remove_meta_testcases) \ | dup_toadd_test testcases_by_tool = {} for meta_testcase in meta_testcases: ttoolalias, testcase = \ DriversUtils.reverse_meta_element(meta_testcase) if ttoolalias not in testcases_by_tool: testcases_by_tool[ttoolalias] = [] testcases_by_tool[ttoolalias].append(testcase) candidate_aliases = [] for tpos, ttoolalias in enumerate(testcases_by_tool.keys()): # @Checkpoint: Check whether already executed if not checkpoint_handler.is_to_execute(func_name=cp_func_name, \ taskid=cp_task_id, \ tool=ttoolalias): continue candidate_aliases.append(ttoolalias) # parallelism strategies PARA_FULL_DOUBLE = 0 PARA_ALT_TOOLS_AND_TESTS = 1 PARA_TOOLS_ONLY = 2 PARA_TOOLS_TESTS_AS_TOOLS = 3 parallel_strategy = PARA_TOOLS_ONLY # minimum number of tests (accross) for parallelism ptest_tresh = 5 # minimum number of tests (of the given tool) for tool parallelism sub_ptest_thresh = 3 shared_loc = multiprocessing.RLock() parallel_test_count_by_tool = {ta: 1 for ta in candidate_aliases} # tool with parallel test exec # TODO: find way to pass parallel count here if parallel_test_count is None: #parallel_test_count = min(10, multiprocessing.cpu_count()) parallel_test_count = min(20, 2 * multiprocessing.cpu_count()) cand_alias_joblib = [] cand_alias_for = [] para_tools = [] para_tools = [tt for tt in candidate_aliases if \ (len(testcases_by_tool[tt]) >= sub_ptest_thresh \ and self.testcases_configured_tools[tt]\ [self.TOOL_OBJ_KEY].can_run_tests_in_parallel()) ] actual_parallel_cond = len(candidate_aliases) > 1 \ and len(meta_testcases) >= ptest_tresh \ and parallel_test_count is not None \ and parallel_test_count > 1 if parallel_strategy == PARA_ALT_TOOLS_AND_TESTS: # the para_tools will run without parallelism, give them all threads for tt in para_tools: parallel_test_count_by_tool[tt] = parallel_test_count seq_tools = list(set(candidate_aliases) - set(para_tools)) if len(seq_tools) > 1 and actual_parallel_cond: cand_alias_joblib = seq_tools cand_alias_for = para_tools else: cand_alias_for = candidate_aliases elif parallel_strategy == PARA_TOOLS_ONLY: if actual_parallel_cond: cand_alias_joblib = candidate_aliases else: cand_alias_for = candidate_aliases elif parallel_strategy == PARA_FULL_DOUBLE: # use parallel sub_parallel_count = 0 if parallel_test_count is None else \ parallel_test_count - len(parallel_test_count_by_tool) if sub_parallel_count > 0: para_tools.sort(reverse=True, \ key=lambda x: len(testcases_by_tool[x])) para_tools_n_tests = sum(\ [len(testcases_by_tool[tt]) for tt in para_tools]) used = 0 for tt in para_tools: quota = int(len(testcases_by_tool[tt]) * \ sub_parallel_count / para_tools_n_tests) parallel_test_count_by_tool[tt] += quota used += quota for tt in para_tools: if used == sub_parallel_count: break parallel_test_count_by_tool[tt] += 1 if actual_parallel_cond: cand_alias_joblib = candidate_aliases else: cand_alias_for = candidate_aliases elif parallel_strategy == PARA_TOOLS_TESTS_AS_TOOLS: # split the tests of one tool and # make the same tool run multiple times ERROR_HANDLER.error_exit("To Be implemented: same tool many times") else: ERROR_HANDLER.error_exit("Invalid parallel startegy") def tool_parallel_test_exec(ttoolalias): # Actual execution found_a_failure = False # Whether the execution was unsuccessful test_error = False ttool = \ self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY] test_failed_verdicts, test_execoutput = ttool.runtests( \ testcases_by_tool[ttoolalias], \ exe_path_map, env_vars, \ stop_on_failure, \ per_test_timeout=per_test_timeout, use_recorded_timeout_times=\ use_recorded_timeout_times, \ recalculate_execution_times=\ recalculate_execution_times, \ with_output_summary=\ with_output_summary, \ hash_outlog=hash_outlog, \ parallel_count=\ parallel_test_count_by_tool[ttoolalias]) with shared_loc: for testcase in test_failed_verdicts: meta_testcase = DriversUtils.make_meta_element(\ testcase, ttoolalias) meta_test_failedverdicts_outlog[0][meta_testcase] = \ test_failed_verdicts[testcase] meta_test_failedverdicts_outlog[1][meta_testcase] = \ test_execoutput[testcase] if not found_a_failure \ and test_failed_verdicts[testcase] == \ common_mix.GlobalConstants.FAIL_TEST_VERDICT: found_a_failure = True if not test_error \ and test_failed_verdicts[testcase] == \ common_mix.GlobalConstants.TEST_EXECUTION_ERROR: test_error = True # @Checkpoint: Chekpointing checkpoint_handler.do_checkpoint(func_name=cp_func_name, \ taskid=cp_task_id, \ tool=ttoolalias, \ opt_payload=meta_test_failedverdicts_outlog) return found_a_failure, test_error #~ def tool_parallel_test_exec() if len(cand_alias_joblib) > 0: parallel_count_ = min(len(cand_alias_joblib), parallel_test_count) joblib.Parallel(n_jobs=parallel_count_, require='sharedmem')\ (joblib.delayed(tool_parallel_test_exec)(ttoolalias) \ for ttoolalias in cand_alias_joblib) if len(cand_alias_for) > 0: for tpos, ttoolalias in enumerate(cand_alias_for): found_a_failure, test_error = \ tool_parallel_test_exec(ttoolalias) if stop_on_failure and found_a_failure: # @Checkpoint: Chekpointing for remaining tools for rem_tool in list(testcases_by_tool.keys())[tpos + 1:]: checkpoint_handler.do_checkpoint(\ func_name=cp_func_name, \ taskid=cp_task_id, \ tool=rem_tool, \ opt_payload=meta_test_failedverdicts_outlog) break if stop_on_failure: # Make sure the non executed test has the uncertain value (None) if len(meta_test_failedverdicts_outlog[0]) < len(meta_testcases): for meta_testcase in set(meta_testcases) - \ set(meta_test_failedverdicts_outlog[0]): meta_test_failedverdicts_outlog[0][meta_testcase] = \ common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT meta_test_failedverdicts_outlog[1][meta_testcase] = \ common_matrices.OutputLogData.\ UNCERTAIN_TEST_OUTLOGDATA ERROR_HANDLER.assert_true(len(meta_test_failedverdicts_outlog[0]) == \ len(meta_testcases), \ "mismatch between number of tests and reported verdicts:" + " Tests without verdict are {};".format(\ set(meta_testcases) - \ set(meta_test_failedverdicts_outlog[0])) \ + " Test not in testlist are {}.".format(\ set(meta_test_failedverdicts_outlog[0]) - \ set(meta_testcases)), \ __file__) # For fdupes if len(self.tests_duplicates_map) > 0: meta_testcases = meta_testcases_backup for i in (0, 1): for mtest in dups_remove_meta_testcases: # add to results meta_test_failedverdicts_outlog[i][mtest] = copy.deepcopy(\ meta_test_failedverdicts_outlog[i]\ [self.tests_duplicates_map[mtest]]) for mtest in dup_toadd_test: # remove from results del meta_test_failedverdicts_outlog[i][mtest] if fault_test_execution_matrix_file is not None: # Load or Create the matrix fault_test_execution_matrix = common_matrices.ExecutionMatrix( \ filename=fault_test_execution_matrix_file, \ non_key_col_list=meta_testcases) ERROR_HANDLER.assert_true(fault_test_execution_matrix.is_empty(), \ "matrix must be empty. Filename is:" " "+fault_test_execution_matrix_file, __file__) failverdict2val = { common_mix.GlobalConstants.FAIL_TEST_VERDICT: \ fault_test_execution_matrix.getActiveCellDefaultVal(), common_mix.GlobalConstants.PASS_TEST_VERDICT: \ fault_test_execution_matrix.getInactiveCellVal(), common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT: \ fault_test_execution_matrix.getUncertainCellDefaultVal(), } cells_dict = {} for meta_testcase in meta_test_failedverdicts_outlog[0]: cells_dict[meta_testcase] = failverdict2val[\ meta_test_failedverdicts_outlog[0][meta_testcase]] fault_test_execution_matrix.add_row_by_key(self.FAULT_MATRIX_KEY, \ cells_dict, serialize=True) if fault_test_execution_execoutput_file is not None: # Load or Create the data object fault_test_execution_execoutput = common_matrices.OutputLogData( \ filename=fault_test_execution_execoutput_file) ERROR_HANDLER.assert_true(\ fault_test_execution_execoutput.is_empty(), \ "outlog data must be empty", __file__) fault_test_execution_execoutput.add_data(\ {self.PROGRAM_EXECOUTPUT_KEY: \ meta_test_failedverdicts_outlog[1]}, \ serialize=True) # @Checkpoint: Finished detailed_exectime = {} for ttoolalias in testcases_by_tool.keys(): tt = self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY] detailed_exectime[ttoolalias] = (\ tt.get_checkpointer().get_execution_time(),\ tt.get_checkpointer().get_detailed_execution_time()) checkpoint_handler.set_finished( \ detailed_exectime_obj=detailed_exectime) if finish_destroy_checkpointer: checkpoint_handler.destroy() return meta_test_failedverdicts_outlog
def runtests_criteria_coverage (self, testcases, \ criteria_element_list_by_criteria, \ criterion_to_matrix, \ criterion_to_executionoutput, \ re_instrument_code=True, \ cover_criteria_elements_once=False, \ prioritization_module_by_criteria=None, \ test_parallel_count=1): """ (TODO: support parallelism: per test outdata) """ # FIXME: Support parallelism, then remove the code # bellow: ERROR_HANDLER.assert_true(test_parallel_count <= 1, \ "FIXME: Must first implement support for parallel mutation") #~ FXIMEnd # save memory testcases = [sys.intern(t) for t in testcases] # @Checkpoint: create a checkpoint handler (for time) cp_func_name = "runtests_criteria_coverage" cp_task_id = 1 checkpoint_handler = CheckPointHandler(self.get_checkpointer()) if checkpoint_handler.is_finished(): return ERROR_HANDLER.assert_true(len(criterion_to_matrix) > 0, \ "no criterion enabled", __file__) ERROR_HANDLER.assert_true(len(set(criterion_to_matrix) - \ set(self.get_supported_criteria())) == 0, \ "Some unsuported criteria are enabled", __file__) ERROR_HANDLER.assert_true(criterion_to_executionoutput is None or \ set(criterion_to_matrix) == set(criterion_to_executionoutput),\ "mismatch of criteria between matrix and outlog" , __file__) # Check that the result_matrix is empty and fine for criterion in criterion_to_matrix: ERROR_HANDLER.assert_true( \ criterion_to_matrix[criterion].is_empty(), \ "the matrix must be empty", __file__) ERROR_HANDLER.assert_true( \ set(testcases) == set(criterion_to_matrix[criterion]\ .get_nonkey_colname_list()), \ "The specified test cases are not same in the matrix", __file__) ERROR_HANDLER.assert_true(\ criterion_to_executionoutput[criterion] is None or \ criterion_to_executionoutput[criterion].is_empty(), \ "the execoutput must be empty", __file__) # @Checkpoint: check if checkpoint_handler.is_to_execute(func_name=cp_func_name, \ taskid=cp_task_id): # Intrument the codes is requested if re_instrument_code: self.instrument_code(\ enabled_criteria=criterion_to_matrix.keys()) # @Checkpoint: checkpoint checkpoint_handler.do_checkpoint(func_name=cp_func_name, \ taskid=cp_task_id) # @Checkpoint: next task cp_task_id += 1 # Split criteria meta_crits = list(set(criterion_to_matrix) & \ set(self._get_meta_instrumentation_criteria())) separated_crits = list(set(criterion_to_matrix) & \ set(self._get_separated_instrumentation_criteria())) m_crit2mat, s_crit2mat = self._extract_sub_dicts(criterion_to_matrix,\ [meta_crits, separated_crits]) if criterion_to_executionoutput is not None: m_crit2execout, s_crit2execout = self._extract_sub_dicts(\ criterion_to_executionoutput,\ [meta_crits, separated_crits]) m_crit2elem, s_crit2elem = self._extract_sub_dicts( \ criteria_element_list_by_criteria,\ [meta_crits, separated_crits]) m_crit2pm, s_crit2pm = None, None if prioritization_module_by_criteria is not None: m_crit2pm, s_crit2pm = self._extract_sub_dicts( \ prioritization_module_by_criteria,\ [meta_crits, separated_crits]) else: s_crit2pm = {c: None for c in separated_crits} # runtest with the meta files if len(m_crit2mat) > 0 and \ checkpoint_handler.is_to_execute(func_name=cp_func_name, \ taskid=cp_task_id): self._runtest_meta_criterion_program(testcases=testcases, \ criterion_to_matrix=m_crit2mat, \ criterion_to_executionoutput=m_crit2execout, \ criteria_element_list_by_criteria=m_crit2elem,\ cover_criteria_elements_once=\ cover_criteria_elements_once,\ prioritization_module_by_criteria=m_crit2pm, \ test_parallel_count=test_parallel_count) # @Checkpoint: checkpoint checkpoint_handler.do_checkpoint(func_name=cp_func_name, \ taskid=cp_task_id) # @Checkpoint: next task cp_task_id += 1 # runtest with the separate files for criterion in s_crit2mat.keys(): if checkpoint_handler.is_to_execute(func_name=cp_func_name, \ taskid=cp_task_id, tool=criterion.get_str()): self._runtest_separate_criterion_program(criterion, \ testcases=testcases, \ matrix=s_crit2mat[criterion], \ executionoutput=s_crit2execout[criterion], \ criteria_element_list=s_crit2elem[criterion],\ cover_criteria_elements_once=\ cover_criteria_elements_once,\ prioritization_module=s_crit2pm[criterion], \ test_parallel_count=test_parallel_count, \ checkpoint_handler=checkpoint_handler, \ cp_calling_func_name=cp_func_name, \ cp_calling_done_task_id=(cp_task_id - 1), \ cp_calling_tool=criterion.get_str()) # @Checkpoint: checkpoint checkpoint_handler.do_checkpoint(func_name=cp_func_name, \ taskid=cp_task_id, tool=criterion.get_str()) # @Checkpoint: Finished (for time) checkpoint_handler.set_finished(None)
def _runtests(self, testcases, exe_path_map, env_vars, \ stop_on_failure=False, per_test_timeout=None, \ use_recorded_timeout_times=None, \ recalculate_execution_times=False, \ with_outlog_hash=True, parallel_count=1): ''' Execute the list of test cases with the given executable and say, for each test case, whether it failed. Note: Re-implement this if there the tool implements ways to faster execute multiple test cases. :param testcases: list of test cases to execute :param exe_path_map: string representing the file system path to the executable to execute with the tests :param env_vars: dict of environment variables to set before executing each test ({<variable>: <value>}) :param stop_on_failure: decide whether to stop the test execution once a test fails :returns: plitair of: - dict of testcase and their failed verdict. {<test case name>: <True if failed, False if passed, UNCERTAIN_TEST_VERDICT if uncertain>} If stop_on_failure is True, only return the tests that have been executed until the failure - test execution output log hash data object or None ''' # @Checkpoint: create a checkpoint handler (for time) checkpoint_handler = CheckPointHandler(self.get_checkpointer()) if checkpoint_handler.is_finished(): logging.warning("{} {} {}".format( \ "The function 'runtests' is finished according", \ "to checkpoint, but called again. None returned", \ "\nPlease Confirm reexecution...")) if common_mix.confirm_execution("{} {}".format( \ "Function 'runtests' is already", \ "finished, do yo want to restart?")): checkpoint_handler.restart() logging.info("Restarting the finished 'runtests'") else: ERROR_HANDLER.error_exit(err_string="{} {} {}".format( \ "Execution halted. Cannot continue because no value", \ " can be returned. Check the results of the", \ "finished execution"), call_location=__file__) if per_test_timeout is None: per_test_timeout = {tc: None for tc in testcases} if use_recorded_timeout_times is not None: ERROR_HANDLER.assert_true(use_recorded_timeout_times > 0, \ "use_recorded_timeout_times must be " "positive if not None", __file__) per_test_timeout.update({x: (y * use_recorded_timeout_times) \ for x, y in self.test_execution_time.items()}) else: ERROR_HANDLER.assert_true(use_recorded_timeout_times is None, \ "use_recorded_timeout_times must not be set " "when per_test_timeout is set", __file__) # Prepare the exes self._prepare_executable(exe_path_map, env_vars, \ collect_output=with_outlog_hash) self._set_env_vars(env_vars) test_failed_verdicts = {} test_outlog_hash = {} for testcase in testcases: start_time = time.time() test_failed, execoutlog_hash = \ self._oracle_execute_a_test(testcase, exe_path_map, \ env_vars, \ timeout=per_test_timeout[testcase], \ with_outlog_hash=with_outlog_hash) # Record exec time if not existing if recalculate_execution_times: self.test_execution_time[testcase] = \ 1 + int(time.time() - start_time) test_failed_verdicts[testcase] = test_failed test_outlog_hash[testcase] = execoutlog_hash if stop_on_failure and test_failed != \ common_mix.GlobalConstants.PASS_TEST_VERDICT: break if recalculate_execution_times: common_fs.dumpJSON(self.test_execution_time, \ self.test_execution_time_storage_file, pretty=True) # Restore back the exes self._restore_env_vars() self._restore_default_executable(exe_path_map, env_vars, \ collect_output=with_outlog_hash) if stop_on_failure: # Make sure the non executed test has the uncertain value (None) if len(test_failed_verdicts) < len(testcases): for testcase in set(testcases) - set(test_failed_verdicts): test_failed_verdicts[testcase] = \ common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT test_outlog_hash[testcase] = common_matrices.\ OutputLogData.UNCERTAIN_TEST_OUTLOGDATA # @Checkpoint: Finished (for time) checkpoint_handler.set_finished(None) if not with_outlog_hash: test_outlog_hash = None return test_failed_verdicts, test_outlog_hash
def runtests(self, meta_testcases=None, exe_path_map=None, env_vars=None, \ stop_on_failure=False, \ per_test_timeout=None, \ use_recorded_timeout_times=None, \ recalculate_execution_times=False, \ fault_test_execution_matrix_file=None, \ fault_test_execution_execoutput_file=None, \ with_outlog_hash=True, \ test_prioritization_module=None, \ parallel_test_count=1, \ parallel_test_scheduler=None, \ restart_checkpointer=False, finish_destroy_checkpointer=True): ''' Execute the list of test cases with the given executable and say, for each test case, whether it failed :param meta_testcases: list of test cases to execute :param exe_path_map: string representing the file system path to the executable to execute with the tests :param env_vars: dict of environment variables to set before executing each test ({<variable>: <value>}) :param stop_on_failure: decide whether to stop the test execution once a test fails :param fault_test_execution_matrix_file: Optional matrix file to store the tests' pass fail execution data :param fault_test_execution_execoutput_file: Optional output log file to store the tests' execution actual output (hashed) :param with_outlog_hash: decide whether to return outlog hash :param test_prioritization_module: Specify the test prioritization module. (TODO: Implement support) :param parallel_test_count: Specify the number of parallel test Execution. must be an integer >= 1 :param parallel_test_scheduler: Specify the function that will handle parallel test scheduling by tool, using the test execution optimizer. (TODO: Implement support) :type restart_checkointer: bool :param restart_checkointer: Decide whether to discard checkpoint and restart anew. :type finish_destroy_checkpointer: bool :param finish_destroy_checkpointer: Decide whether to automatically destroy the checkpointer when done or not Useful is caller has a checkpointer to update. :returns: dict of testcase and their failed verdict. {<test case name>: <True if failed, False if passed, UNCERTAIN_TEST_VERDICT if uncertain>} If stop_on_failure is True, only return the tests that have been executed until the failure ''' # FIXME: Make sure that the support are implemented for # parallelism and test prioritization. Remove the code bellow # once supported: ERROR_HANDLER.assert_true(test_prioritization_module is None, \ "Must implement test prioritization support here", \ __file__) ERROR_HANDLER.assert_true(parallel_test_count <= 1, \ "Must implement parallel tests execution support here", \ __file__) ERROR_HANDLER.assert_true(parallel_test_scheduler is None, \ "Must implement parallel tests execution support here", \ __file__) #~FIXMEnd # Check arguments Validity if exe_path_map is None: exe_path_map = self._get_default_exe_path_map() ERROR_HANDLER.assert_true(parallel_test_count > 0, \ "invalid parallel test execution count: {}. {}".format( \ parallel_test_count, "must be >= 1")) # @Checkpoint: create a checkpoint handler cp_func_name = "runtests" cp_task_id = 1 checkpoint_handler = \ CheckPointHandler(self.get_checkpoint_state_object()) if restart_checkpointer: checkpoint_handler.restart() if checkpoint_handler.is_finished(): logging.warning("%s %s" %("The function 'runtests' is finished", \ "according to checkpoint, but called again. None returned")) if common_mix.confirm_execution("%s %s" % ( \ "Function 'runtests' is already", \ "finished, do you want to restart?")): checkpoint_handler.restart() logging.info("Restarting the finished 'runtests'") else: ERROR_HANDLER.error_exit(err_string="%s %s %s" % (\ "Execution halted. Cannot continue because no value", \ " can be returned. Check the results of the", \ "finished execution"), call_location=__file__) # @Checkpoint: Get the saved payload (data kapt for each tool) # pair list of testfailed verdict and execution output meta_test_failedverdicts_outlog = \ checkpoint_handler.get_optional_payload() if meta_test_failedverdicts_outlog is None: meta_test_failedverdicts_outlog = [{}, {}] # Make sure the tests are unique ERROR_HANDLER.assert_true(len(meta_testcases) == \ len(set(meta_testcases)), \ "not all tests are unique", __file__) testcases_by_tool = {} for meta_testcase in meta_testcases: ttoolalias, testcase = \ DriversUtils.reverse_meta_element(meta_testcase) if ttoolalias not in testcases_by_tool: testcases_by_tool[ttoolalias] = [] testcases_by_tool[ttoolalias].append(testcase) found_a_failure = False for tpos, ttoolalias in enumerate(testcases_by_tool.keys()): # @Checkpoint: Check whether already executed if not checkpoint_handler.is_to_execute(func_name=cp_func_name, \ taskid=cp_task_id, \ tool=ttoolalias): continue # Actual execution ttool = \ self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY] test_failed_verdicts, test_execoutput = ttool.runtests( \ testcases_by_tool[ttoolalias], \ exe_path_map, env_vars, \ stop_on_failure, \ per_test_timeout=per_test_timeout, use_recorded_timeout_times=\ use_recorded_timeout_times, \ recalculate_execution_times=\ recalculate_execution_times, \ with_outlog_hash=with_outlog_hash) for testcase in test_failed_verdicts: meta_testcase = \ DriversUtils.make_meta_element(testcase, ttoolalias) meta_test_failedverdicts_outlog[0][meta_testcase] = \ test_failed_verdicts[testcase] meta_test_failedverdicts_outlog[1][meta_testcase] = \ test_execoutput[testcase] if test_failed_verdicts[testcase] == \ common_mix.GlobalConstants.COMMAND_UNCERTAIN: found_a_failure = True # @Checkpoint: Chekpointing checkpoint_handler.do_checkpoint(func_name=cp_func_name, \ taskid=cp_task_id, \ tool=ttoolalias, \ opt_payload=meta_test_failedverdicts_outlog) if stop_on_failure and found_a_failure: # @Checkpoint: Chekpointing for remaining tools for rem_tool in list(testcases_by_tool.keys())[tpos + 1:]: checkpoint_handler.do_checkpoint(func_name=cp_func_name, \ taskid=cp_task_id, \ tool=rem_tool, \ opt_payload=meta_test_failedverdicts_outlog) break if stop_on_failure: # Make sure the non executed test has the uncertain value (None) if len(meta_test_failedverdicts_outlog[0]) < len(meta_testcases): for meta_testcase in set(meta_testcases) - \ set(meta_test_failedverdicts_outlog[0]): meta_test_failedverdicts_outlog[0][meta_testcase] = \ common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT meta_test_failedverdicts_outlog[1][meta_testcase] = \ common_matrices.OutputLogData.\ UNCERTAIN_TEST_OUTLOGDATA ERROR_HANDLER.assert_true(len(meta_test_failedverdicts_outlog[0]) == \ len(meta_testcases), \ "Not all tests have a verdict reported", __file__) if fault_test_execution_matrix_file is not None: # Load or Create the matrix fault_test_execution_matrix = common_matrices.ExecutionMatrix( \ filename=fault_test_execution_matrix_file, \ non_key_col_list=meta_testcases) ERROR_HANDLER.assert_true(fault_test_execution_matrix.is_empty(), \ "matrix must be empty", __file__) failverdict2val = { common_mix.GlobalConstants.FAIL_TEST_VERDICT: \ fault_test_execution_matrix.getActiveCellDefaultVal(), common_mix.GlobalConstants.PASS_TEST_VERDICT: \ fault_test_execution_matrix.getInactiveCellVal(), common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT: \ fault_test_execution_matrix.getUncertainCellDefaultVal(), } cells_dict = {} for meta_testcase in meta_test_failedverdicts_outlog[0]: cells_dict[meta_testcase] = failverdict2val[\ meta_test_failedverdicts_outlog[0][meta_testcase]] fault_test_execution_matrix.add_row_by_key(self.FAULT_MATRIX_KEY, \ cells_dict, serialize=True) if fault_test_execution_execoutput_file is None: meta_test_failedverdicts_outlog[1] = None else: # Load or Create the data object fault_test_execution_execoutput = common_matrices.OutputLogData( \ filename=fault_test_execution_execoutput_file) ERROR_HANDLER.assert_true(\ fault_test_execution_execoutput.is_empty(), \ "outlog data must be empty", __file__) fault_test_execution_execoutput.add_data(\ {self.PROGRAM_EXECOUTPUT_KEY: \ meta_test_failedverdicts_outlog[1]}, \ serialize=True) # @Checkpoint: Finished detailed_exectime = {} for ttoolalias in testcases_by_tool.keys(): tt = self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY] detailed_exectime[ttoolalias] = (\ tt.get_checkpointer().get_execution_time(),\ tt.get_checkpointer().get_detailed_execution_time()) checkpoint_handler.set_finished( \ detailed_exectime_obj=detailed_exectime) if finish_destroy_checkpointer: checkpoint_handler.destroy() return meta_test_failedverdicts_outlog
def _runtests(self, testcases, exe_path_map, env_vars, \ stop_on_failure=False, per_test_timeout=None, \ use_recorded_timeout_times=None, \ recalculate_execution_times=False, \ with_output_summary=True, hash_outlog=True, \ parallel_count=1): ''' Execute the list of test cases with the given executable and say, for each test case, whether it failed. Note: Re-implement this if there the tool implements ways to faster execute multiple test cases. :param testcases: list of test cases to execute :param exe_path_map: string representing the file system path to the executable to execute with the tests :param env_vars: dict of environment variables to set before executing each test ({<variable>: <value>}) :param stop_on_failure: decide whether to stop the test execution once a test fails :returns: plitair of: - dict of testcase and their failed verdict. {<test case name>: <True if failed, False if passed, UNCERTAIN_TEST_VERDICT if uncertain>} If stop_on_failure is True, only return the tests that have been executed until the failure - test execution output log hash data object or None ''' # @Checkpoint: create a checkpoint handler (for time) checkpoint_handler = CheckPointHandler(self.get_checkpointer()) if checkpoint_handler.is_finished(): logging.warning("{} {} {}".format( \ "The function 'runtests' is finished according", \ "to checkpoint, but called again. None returned", \ "\nPlease Confirm reexecution...")) if common_mix.confirm_execution("{} {}".format( \ "Function 'runtests' is already", \ "finished, do yo want to restart?")): checkpoint_handler.restart() logging.info("Restarting the finished 'runtests'") else: ERROR_HANDLER.error_exit(err_string="{} {} {}".format( \ "Execution halted. Cannot continue because no value", \ " can be returned. Check the results of the", \ "finished execution"), call_location=__file__) if per_test_timeout is None: per_test_timeout = {tc: None for tc in testcases} if use_recorded_timeout_times is not None: ERROR_HANDLER.assert_true(use_recorded_timeout_times > 0, \ "use_recorded_timeout_times must be " "positive if not None", __file__) per_test_timeout.update({x: (y * use_recorded_timeout_times) \ for x, y in self.test_execution_time.items()}) else: ERROR_HANDLER.assert_true(use_recorded_timeout_times is None, \ "use_recorded_timeout_times must not be set " "when per_test_timeout is set", __file__) # Prepare the exes self._prepare_executable(exe_path_map, env_vars, \ collect_output=with_output_summary) self._set_env_vars(env_vars) test_failed_verdicts = {} test_outlog_hash = {} processbar = tqdm.tqdm(testcases, leave=False, dynamic_ncols=True) # Parallel stuffs def test_exec_iteration(testcase): processbar.set_description("Running Test {} (x{})".format(\ testcase, parallel_count)) start_time = time.time() test_failed, execoutlog_hash = \ self._oracle_execute_a_test(testcase, exe_path_map, \ env_vars, \ timeout=per_test_timeout[testcase], \ with_output_summary=with_output_summary, \ hash_outlog=hash_outlog) #if testcase.endswith('.ktest'): # DBG - fix hang # logging.debug("KTEST {} is done".format(testcase)) # Record exec time if not existing with self.shared_loc: if recalculate_execution_times: self.test_execution_time[testcase] = \ max(1, int(time.time() - start_time)) \ * self.config.RECORDED_TEST_TIMEOUT_FACTOR test_failed_verdicts[testcase] = test_failed test_outlog_hash[testcase] = execoutlog_hash return test_failed #~ def test_exec_iteration() if self.can_run_tests_in_parallel() and parallel_count is not None \ and parallel_count > 1: parallel_count = min(len(testcases), parallel_count) joblib.Parallel(n_jobs=parallel_count, require='sharedmem')\ (joblib.delayed(test_exec_iteration)(testcase) \ for testcase in processbar) else: parallel_count = 1 # to be printed in progress for testcase in processbar: test_failed = test_exec_iteration(testcase) if stop_on_failure and test_failed != \ common_mix.GlobalConstants.PASS_TEST_VERDICT: break if recalculate_execution_times: common_fs.dumpJSON(self.test_execution_time, \ self.test_execution_time_storage_file, pretty=True) # Restore back the exes self._restore_env_vars() self._restore_default_executable(exe_path_map, env_vars, \ collect_output=with_output_summary) if stop_on_failure: # Make sure the non executed test has the uncertain value (None) if len(test_failed_verdicts) < len(testcases): for testcase in set(testcases) - set(test_failed_verdicts): test_failed_verdicts[testcase] = \ common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT test_outlog_hash[testcase] = common_matrices.\ OutputLogData.UNCERTAIN_TEST_OUTLOGDATA # @Checkpoint: Finished (for time) checkpoint_handler.set_finished(None) if not with_output_summary: test_outlog_hash = None ERROR_HANDLER.assert_true(len(testcases) == len(test_failed_verdicts),\ "Mismatch between testcases and test_failed_verdict (BUG)",\ __file__) return test_failed_verdicts, test_outlog_hash
def instrument_code (self, criteria_enabled_list=None, \ exe_path_map=None, \ #outputdir_override=None, \ #code_builds_factory_override=None, \ parallel_count=1, \ restart_checkpointer=False, \ finish_destroy_checkpointer=True): """ Instrument the code for the criteria measurements. :type criteria_enabled_list: dict or None :param criteria_enabled_list: When None, use all supported criteria else use the specified criteria :type \exe_path_map: dict or None :param \exe_path_map: When None, use all exe, else instrument files as dict key and write the instrumented output in directory as value. :type \parallel_count: :param \parallel_count: :type \restart_checkpointer: :param \restart_checkpointer: :type \finish_destroy_checkpointer: :param \finish_destroy_checkpointer: :raises: :rtype: """ # FIXME: Support parallelism, then remove the code # bellow: ERROR_HANDLER.assert_true(parallel_count <= 1, \ "FIXME: Must first implement support for parallel") #~ FXIMEnd # Check arguments Validity ERROR_HANDLER.assert_true(parallel_count > 0, \ "invalid parallel execution count: {}. {}".format( \ parallel_count, "must be >= 1"), __file__) # @Checkpoint: create a checkpoint handler cp_func_name = "instrument_code" cp_task_id = 1 checkpoint_handler = CheckPointHandler( \ self.get_checkpoint_state_object()) if restart_checkpointer: checkpoint_handler.restart() if checkpoint_handler.is_finished(): return if criteria_enabled_list is None: criteria_enabled_list = self.tools_config_by_criterion_dict.keys() else: ERROR_HANDLER.assert_true(len(criteria_enabled_list) > 0, \ "no criterion is enabled", __file__) ERROR_HANDLER.assert_true(len(set(criteria_enabled_list) - \ set(self.tools_config_by_criterion_dict)) == 0, \ "Passed matrice output are more than tool specified", \ __file__) tool2criteria = self._get_tool2criteria(criteria_enabled_list) for ctoolalias in tool2criteria: # @Checkpoint: Check whether already executed if checkpoint_handler.is_to_execute( \ func_name=cp_func_name, \ taskid=cp_task_id, \ tool=ctoolalias): # Actual execution ctool = self.criteria_configured_tools[ctoolalias][\ self.TOOL_OBJ_KEY] ctool.instrument_code(\ enabled_criteria=tool2criteria[ctoolalias],\ exe_path_map=exe_path_map) # ensure repo is set back self.code_builds_factory.set_repo_to_build_default() # @Checkpoint: Checkpointing checkpoint_handler.do_checkpoint( \ func_name=cp_func_name, \ taskid=cp_task_id, \ tool=ctoolalias) # Invalidate any existing mutant info so it can be recomputed self._invalidate_criteria_info( enabled_criteria=tool2criteria[ctoolalias]) # @Checkpoint: Finished detailed_exectime = {} for ctoolalias in tool2criteria: ct = self.criteria_configured_tools[ctoolalias][self.TOOL_OBJ_KEY] detailed_exectime[ctoolalias] = (\ ct.get_checkpointer().get_execution_time(),\ ct.get_checkpointer().get_detailed_execution_time()) checkpoint_handler.set_finished( \ detailed_exectime_obj=detailed_exectime) if finish_destroy_checkpointer: checkpoint_handler.destroy()
def runtests_criteria_coverage (self, testcases, criterion_to_matrix, \ criterion_to_executionoutput=None, criteria_element_list_by_criteria=None, \ re_instrument_code=False, \ cover_criteria_elements_once=False, prioritization_module_by_criteria=None, parallel_count=1, \ parallel_criteria_test_scheduler=None,\ restart_checkpointer=False, \ finish_destroy_checkpointer=True): ''' Executes the instrumented executable code with testscases and returns the different code coverage matrices. :param testcases: list of testcases to execute :param criterion_to_matrix: dict of <criterion, Matrix file where to store coverage>. :param criterion_to_executionoutput: dict of <criterion, execoutput file where to store coverage>. :param criteria_element_list_by_criteria: dictionary representing the list of criteria elements (stmts, branches, mutants) to consider in the test execution matices. Key is the criterion and the value the list of elements :param re_instrument_code: Decide whether to instrument code before running the tests. (Example when instrumentation was not specifically called. This is True by default) :param cover_criteria_elements_once: Specify whether to cover criteria elements once is enough, meaning that we stop analysing a criterion element once a test covers it. The remaining test covering verdict will be UNKNOWN. :param prioritization_module_by_criteria: dict of prioritization module by criteria. None means no prioritization used. :type \parallel_count: :param \parallel_count: :type \parallel_criteria_test_scheduler: :param \parallel_criteria_test_scheduler: scheduler that organize parallelism across criteria tools. (TODO: Implement support) :type \restart_checkpointer: :param \restart_checkpointer: :type finish_destroy_checkpointer: :param finish_destroy_checkpointer: ''' # FIXME: Make sure that the support are implemented for # parallelism and test prioritization. Remove the code bellow # once supported: ERROR_HANDLER.assert_true(parallel_count <= 1, \ "Must implement parallel execution support here", \ __file__) ERROR_HANDLER.assert_true(parallel_criteria_test_scheduler is None, \ "Must implement parallel codes tests execution support here", \ __file__) #~FIXMEnd # Check arguments Validity ERROR_HANDLER.assert_true(parallel_count > 0, \ "invalid parallel execution count: {}. {}".format( \ parallel_count, "must be >= 1")) # @Checkpoint: create a checkpoint handler cp_func_name = "runtests_criteria_coverage" cp_task_id = 1 checkpoint_handler = CheckPointHandler( \ self.get_checkpoint_state_object()) if restart_checkpointer: checkpoint_handler.restart() if checkpoint_handler.is_finished(): return ERROR_HANDLER.assert_true(len(criterion_to_matrix) > 0, \ "no criterion is enabled", __file__) ERROR_HANDLER.assert_true(len(set(criterion_to_matrix) - \ set(self.tools_config_by_criterion_dict)) == 0, \ "Passed matrices output are more than tool specified", \ __file__) if criterion_to_executionoutput is not None: ERROR_HANDLER.assert_true(set(criterion_to_matrix) == \ set(criterion_to_executionoutput), \ "criteria mismatch between matrix and output", \ __file__) tool2criteria = self._get_tool2criteria(criterion_to_matrix.keys()) matrices_dir_tmp = os.path.join(self.criteria_working_dir, \ "codecov_dir.tmp") if os.path.isdir(matrices_dir_tmp): if restart_checkpointer: shutil.rmtree(matrices_dir_tmp) os.mkdir(matrices_dir_tmp) else: os.mkdir(matrices_dir_tmp) if criteria_element_list_by_criteria is None: criteria_element_list_by_criteria = \ {c: None for c in criterion_to_matrix} # get criteria elements by tools criteria_elem_list_by_tool = {} for criterion in criteria_element_list_by_criteria: if criteria_element_list_by_criteria[criterion] is None: for t_conf in self.tools_config_by_criterion_dict[criterion]: toolalias = t_conf.get_tool_config_alias() if toolalias not in criteria_elem_list_by_tool: criteria_elem_list_by_tool[toolalias] = {} criteria_elem_list_by_tool[toolalias][criterion] = None continue ERROR_HANDLER.assert_true(\ len(criteria_element_list_by_criteria[criterion]) != 0, \ "Empty criteria element list for criterion "\ +criterion.get_str(), __file__) for crit_elem in criteria_element_list_by_criteria[criterion]: toolalias, elem = DriversUtils.reverse_meta_element(crit_elem) if toolalias not in criteria_elem_list_by_tool: criteria_elem_list_by_tool[toolalias] = {} if criterion not in criteria_elem_list_by_tool[toolalias]: criteria_elem_list_by_tool[toolalias][criterion] = [] criteria_elem_list_by_tool[toolalias][criterion].append(elem) ERROR_HANDLER.assert_true(len(set(criteria_elem_list_by_tool) - \ set(self.criteria_configured_tools)) == 0, \ "some tools in data not registered", __file__) crit2tool2matrixfile = {cv: {} for cv in criterion_to_matrix} crit2tool2outhashfile = {cv: {} for cv in criterion_to_executionoutput} for ctoolalias in tool2criteria: _criteria2matrix = {} _criteria2outhash = {} for criterion in tool2criteria[ctoolalias]: _criteria2matrix[criterion] = os.path.join(matrices_dir_tmp, \ criterion.get_field_value() + '-' + ctoolalias + '.csv') if criterion_to_executionoutput is None or \ criterion_to_executionoutput[criterion] is None: _criteria2outhash[criterion] = None else: _criteria2outhash[criterion] = \ os.path.join(matrices_dir_tmp, \ criterion.get_field_value() + '-' + ctoolalias + '.outloghash.json') crit2tool2matrixfile[criterion][ctoolalias] = \ _criteria2matrix[criterion] crit2tool2outhashfile[criterion][ctoolalias] = \ _criteria2outhash[criterion] # @Checkpoint: Check whether already executed if checkpoint_handler.is_to_execute( \ func_name=cp_func_name, \ taskid=cp_task_id, \ tool=ctoolalias): for criterion in _criteria2matrix: _criteria2matrix[criterion] = \ common_matrices.ExecutionMatrix( \ filename=_criteria2matrix[criterion], \ non_key_col_list=testcases) if _criteria2outhash[criterion] is not None: _criteria2outhash[criterion] = \ common_matrices.OutputLogData( \ filename=_criteria2outhash[criterion]) # Actual execution ctool = self.criteria_configured_tools[ctoolalias][\ self.TOOL_OBJ_KEY] ctool.runtests_criteria_coverage(testcases, \ criteria_element_list_by_criteria=\ criteria_elem_list_by_tool[toolalias],\ criterion_to_matrix=_criteria2matrix, \ criterion_to_executionoutput=\ _criteria2outhash,\ re_instrument_code=re_instrument_code, \ cover_criteria_elements_once=\ cover_criteria_elements_once, \ prioritization_module_by_criteria=\ prioritization_module_by_criteria) # Checkpointing checkpoint_handler.do_checkpoint( \ func_name=cp_func_name, \ taskid=cp_task_id, \ tool=ctoolalias) # Aggregate the matrices and out hashes ## Create reult matrices and out hashes result_matrices = {} result_outloghashes = {} for criterion in criterion_to_matrix: result_matrices[criterion] = common_matrices.ExecutionMatrix( \ filename=criterion_to_matrix[criterion], \ non_key_col_list=testcases) if criterion_to_executionoutput[criterion] is None: result_outloghashes[criterion] = None else: result_outloghashes[criterion] = \ common_matrices.OutputLogData(filename=\ criterion_to_executionoutput[criterion]) ERROR_HANDLER.assert_true(\ crit2tool2outhashfile[criterion] is not None, "Bug: log enabled but hidden from tool", __file__) ## Actual aggregate logging.debug("saving results ...") for criterion in result_matrices: result_matrix = result_matrices[criterion] result_outloghash = result_outloghashes[criterion] for mtoolalias in crit2tool2matrixfile[criterion]: tool_matrix = common_matrices.ExecutionMatrix(\ filename=crit2tool2matrixfile[criterion][mtoolalias]) # Check columns ERROR_HANDLER.assert_true(tool_matrix.get_key_colname() == \ result_matrix.get_key_colname(), \ "mismatch on key column name", __file__) ERROR_HANDLER.assert_true( \ set(tool_matrix.get_nonkey_colname_list()) == \ set(result_matrix.get_nonkey_colname_list()), \ "mismatch on non key column names", __file__) # bring in the data key2nonkeydict = tool_matrix.to_pandas_df().\ set_index(tool_matrix.get_key_colname(), drop=True).\ to_dict(orient="index") for c_key in key2nonkeydict: meta_c_key = DriversUtils.make_meta_element(\ str(c_key), mtoolalias) result_matrix.add_row_by_key(meta_c_key, key2nonkeydict[c_key], serialize=False) # out log hash if crit2tool2outhashfile[criterion] is not None: tool_outloghash = common_matrices.OutputLogData(\ filename=\ crit2tool2outhashfile[criterion][mtoolalias]) for objective, objective_data in \ tool_outloghash.get_zip_objective_and_data(): meta_objective = DriversUtils.make_meta_element(\ str(objective), mtoolalias) result_outloghash.add_data( {meta_objective: objective_data}, \ serialize=False) # @Checkpoint: Check whether already executed if checkpoint_handler.is_to_execute( \ func_name=cp_func_name, \ taskid=cp_task_id + 1, tool=criterion.get_str()): # Serialized the computed matrix result_matrix.serialize() if result_outloghash is not None: result_outloghash.serialize() # @Checkpoint: Checkpointing checkpoint_handler.do_checkpoint( \ func_name=cp_func_name, \ taskid=cp_task_id + 1, tool=criterion.get_str()) # Delete the temporary tool matrix's directory if os.path.isdir(matrices_dir_tmp): shutil.rmtree(matrices_dir_tmp) # @Checkpoint: Finished detailed_exectime = {} for ctoolalias in tool2criteria: ct = self.criteria_configured_tools[ctoolalias][self.TOOL_OBJ_KEY] detailed_exectime[ctoolalias] = (\ ct.get_checkpointer().get_execution_time(),\ ct.get_checkpointer().get_detailed_execution_time()) checkpoint_handler.set_finished(\ detailed_exectime_obj=detailed_exectime) if finish_destroy_checkpointer: checkpoint_handler.destroy()