def runtests(self, meta_testcases=None, exe_path_map=None, env_vars=None, \ stop_on_failure=False, \ per_test_timeout=None, \ use_recorded_timeout_times=None, \ recalculate_execution_times=False, \ fault_test_execution_matrix_file=None, \ fault_test_execution_execoutput_file=None, \ with_output_summary=True, \ hash_outlog=None, \ test_prioritization_module=None, \ parallel_test_count=1, \ parallel_test_scheduler=None, \ restart_checkpointer=False, finish_destroy_checkpointer=True): ''' Execute the list of test cases with the given executable and say, for each test case, whether it failed :param meta_testcases: list of test cases to execute :param exe_path_map: string representing the file system path to the executable to execute with the tests :param env_vars: dict of environment variables to set before executing each test ({<variable>: <value>}) :param stop_on_failure: decide whether to stop the test execution once a test fails :param fault_test_execution_matrix_file: Optional matrix file to store the tests' pass fail execution data :param fault_test_execution_execoutput_file: Optional output log file to store the tests' execution actual output (hashed) :param with_output_summary: decide whether to return outlog hash :type hash_outlog: bool :hash_outlog: decide whether to hash the outlog or not :param test_prioritization_module: Specify the test prioritization module. (TODO: Implement support) :param parallel_test_count: Specify the number of parallel test Execution. must be an integer >= 1 or None. When None, the max possible value is used. :param parallel_test_scheduler: Specify the function that will handle parallel test scheduling by tool, using the test execution optimizer. (TODO: Implement support) :type restart_checkointer: bool :param restart_checkointer: Decide whether to discard checkpoint and restart anew. :type finish_destroy_checkpointer: bool :param finish_destroy_checkpointer: Decide whether to automatically destroy the checkpointer when done or not Useful is caller has a checkpointer to update. :returns: dict of testcase and their failed verdict. {<test case name>: <True if failed, False if passed, UNCERTAIN_TEST_VERDICT if uncertain>} If stop_on_failure is True, only return the tests that have been executed until the failure ''' ERROR_HANDLER.assert_true(meta_testcases is not None, \ "Must specify testcases", __file__) # FIXME: Make sure that the support are implemented for # parallelism and test prioritization. Remove the code bellow # once supported: ERROR_HANDLER.assert_true(test_prioritization_module is None, \ "Must implement test prioritization support here", \ __file__) ERROR_HANDLER.assert_true(parallel_test_scheduler is None, \ "Must implement parallel tests execution support here", \ __file__) #~FIXMEnd # Check arguments Validity if exe_path_map is None: exe_path_map = self._get_default_exe_path_map() if hash_outlog is None: hash_outlog = self.hash_outlog ERROR_HANDLER.assert_true(parallel_test_count is None \ or parallel_test_count >= 1, \ "invalid parallel tests count ({})".format(\ parallel_test_count), __file__) # @Checkpoint: create a checkpoint handler cp_func_name = "runtests" cp_task_id = 1 checkpoint_handler = \ CheckPointHandler(self.get_checkpoint_state_object()) if restart_checkpointer: checkpoint_handler.restart() if checkpoint_handler.is_finished(): logging.warning("%s %s" %("The function 'runtests' is finished", \ "according to checkpoint, but called again. None returned")) if common_mix.confirm_execution("%s %s" % ( \ "Function 'runtests' is already", \ "finished, do you want to restart?")): checkpoint_handler.restart() logging.info("Restarting the finished 'runtests'") else: ERROR_HANDLER.error_exit(err_string="%s %s %s" % (\ "Execution halted. Cannot continue because no value", \ " can be returned. Check the results of the", \ "finished execution"), call_location=__file__) # @Checkpoint: Get the saved payload (data kapt for each tool) # pair list of testfailed verdict and execution output meta_test_failedverdicts_outlog = \ checkpoint_handler.get_optional_payload() if meta_test_failedverdicts_outlog is None: meta_test_failedverdicts_outlog = [{}, {}] # Make sure the tests are unique ERROR_HANDLER.assert_true(len(meta_testcases) == \ len(set(meta_testcases)), \ "not all tests are unique", __file__) # For fdupes if len(self.tests_duplicates_map) > 0: meta_testcases_backup = meta_testcases meta_testcases = set(meta_testcases) dups_remove_meta_testcases = meta_testcases & \ set(self.tests_duplicates_map) dup_toadd_test = {self.tests_duplicates_map[v] for v in \ dups_remove_meta_testcases} - meta_testcases meta_testcases = (meta_testcases - dups_remove_meta_testcases) \ | dup_toadd_test testcases_by_tool = {} for meta_testcase in meta_testcases: ttoolalias, testcase = \ DriversUtils.reverse_meta_element(meta_testcase) if ttoolalias not in testcases_by_tool: testcases_by_tool[ttoolalias] = [] testcases_by_tool[ttoolalias].append(testcase) candidate_aliases = [] for tpos, ttoolalias in enumerate(testcases_by_tool.keys()): # @Checkpoint: Check whether already executed if not checkpoint_handler.is_to_execute(func_name=cp_func_name, \ taskid=cp_task_id, \ tool=ttoolalias): continue candidate_aliases.append(ttoolalias) # parallelism strategies PARA_FULL_DOUBLE = 0 PARA_ALT_TOOLS_AND_TESTS = 1 PARA_TOOLS_ONLY = 2 PARA_TOOLS_TESTS_AS_TOOLS = 3 parallel_strategy = PARA_TOOLS_ONLY # minimum number of tests (accross) for parallelism ptest_tresh = 5 # minimum number of tests (of the given tool) for tool parallelism sub_ptest_thresh = 3 shared_loc = multiprocessing.RLock() parallel_test_count_by_tool = {ta: 1 for ta in candidate_aliases} # tool with parallel test exec # TODO: find way to pass parallel count here if parallel_test_count is None: #parallel_test_count = min(10, multiprocessing.cpu_count()) parallel_test_count = min(20, 2 * multiprocessing.cpu_count()) cand_alias_joblib = [] cand_alias_for = [] para_tools = [] para_tools = [tt for tt in candidate_aliases if \ (len(testcases_by_tool[tt]) >= sub_ptest_thresh \ and self.testcases_configured_tools[tt]\ [self.TOOL_OBJ_KEY].can_run_tests_in_parallel()) ] actual_parallel_cond = len(candidate_aliases) > 1 \ and len(meta_testcases) >= ptest_tresh \ and parallel_test_count is not None \ and parallel_test_count > 1 if parallel_strategy == PARA_ALT_TOOLS_AND_TESTS: # the para_tools will run without parallelism, give them all threads for tt in para_tools: parallel_test_count_by_tool[tt] = parallel_test_count seq_tools = list(set(candidate_aliases) - set(para_tools)) if len(seq_tools) > 1 and actual_parallel_cond: cand_alias_joblib = seq_tools cand_alias_for = para_tools else: cand_alias_for = candidate_aliases elif parallel_strategy == PARA_TOOLS_ONLY: if actual_parallel_cond: cand_alias_joblib = candidate_aliases else: cand_alias_for = candidate_aliases elif parallel_strategy == PARA_FULL_DOUBLE: # use parallel sub_parallel_count = 0 if parallel_test_count is None else \ parallel_test_count - len(parallel_test_count_by_tool) if sub_parallel_count > 0: para_tools.sort(reverse=True, \ key=lambda x: len(testcases_by_tool[x])) para_tools_n_tests = sum(\ [len(testcases_by_tool[tt]) for tt in para_tools]) used = 0 for tt in para_tools: quota = int(len(testcases_by_tool[tt]) * \ sub_parallel_count / para_tools_n_tests) parallel_test_count_by_tool[tt] += quota used += quota for tt in para_tools: if used == sub_parallel_count: break parallel_test_count_by_tool[tt] += 1 if actual_parallel_cond: cand_alias_joblib = candidate_aliases else: cand_alias_for = candidate_aliases elif parallel_strategy == PARA_TOOLS_TESTS_AS_TOOLS: # split the tests of one tool and # make the same tool run multiple times ERROR_HANDLER.error_exit("To Be implemented: same tool many times") else: ERROR_HANDLER.error_exit("Invalid parallel startegy") def tool_parallel_test_exec(ttoolalias): # Actual execution found_a_failure = False # Whether the execution was unsuccessful test_error = False ttool = \ self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY] test_failed_verdicts, test_execoutput = ttool.runtests( \ testcases_by_tool[ttoolalias], \ exe_path_map, env_vars, \ stop_on_failure, \ per_test_timeout=per_test_timeout, use_recorded_timeout_times=\ use_recorded_timeout_times, \ recalculate_execution_times=\ recalculate_execution_times, \ with_output_summary=\ with_output_summary, \ hash_outlog=hash_outlog, \ parallel_count=\ parallel_test_count_by_tool[ttoolalias]) with shared_loc: for testcase in test_failed_verdicts: meta_testcase = DriversUtils.make_meta_element(\ testcase, ttoolalias) meta_test_failedverdicts_outlog[0][meta_testcase] = \ test_failed_verdicts[testcase] meta_test_failedverdicts_outlog[1][meta_testcase] = \ test_execoutput[testcase] if not found_a_failure \ and test_failed_verdicts[testcase] == \ common_mix.GlobalConstants.FAIL_TEST_VERDICT: found_a_failure = True if not test_error \ and test_failed_verdicts[testcase] == \ common_mix.GlobalConstants.TEST_EXECUTION_ERROR: test_error = True # @Checkpoint: Chekpointing checkpoint_handler.do_checkpoint(func_name=cp_func_name, \ taskid=cp_task_id, \ tool=ttoolalias, \ opt_payload=meta_test_failedverdicts_outlog) return found_a_failure, test_error #~ def tool_parallel_test_exec() if len(cand_alias_joblib) > 0: parallel_count_ = min(len(cand_alias_joblib), parallel_test_count) joblib.Parallel(n_jobs=parallel_count_, require='sharedmem')\ (joblib.delayed(tool_parallel_test_exec)(ttoolalias) \ for ttoolalias in cand_alias_joblib) if len(cand_alias_for) > 0: for tpos, ttoolalias in enumerate(cand_alias_for): found_a_failure, test_error = \ tool_parallel_test_exec(ttoolalias) if stop_on_failure and found_a_failure: # @Checkpoint: Chekpointing for remaining tools for rem_tool in list(testcases_by_tool.keys())[tpos + 1:]: checkpoint_handler.do_checkpoint(\ func_name=cp_func_name, \ taskid=cp_task_id, \ tool=rem_tool, \ opt_payload=meta_test_failedverdicts_outlog) break if stop_on_failure: # Make sure the non executed test has the uncertain value (None) if len(meta_test_failedverdicts_outlog[0]) < len(meta_testcases): for meta_testcase in set(meta_testcases) - \ set(meta_test_failedverdicts_outlog[0]): meta_test_failedverdicts_outlog[0][meta_testcase] = \ common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT meta_test_failedverdicts_outlog[1][meta_testcase] = \ common_matrices.OutputLogData.\ UNCERTAIN_TEST_OUTLOGDATA ERROR_HANDLER.assert_true(len(meta_test_failedverdicts_outlog[0]) == \ len(meta_testcases), \ "mismatch between number of tests and reported verdicts:" + " Tests without verdict are {};".format(\ set(meta_testcases) - \ set(meta_test_failedverdicts_outlog[0])) \ + " Test not in testlist are {}.".format(\ set(meta_test_failedverdicts_outlog[0]) - \ set(meta_testcases)), \ __file__) # For fdupes if len(self.tests_duplicates_map) > 0: meta_testcases = meta_testcases_backup for i in (0, 1): for mtest in dups_remove_meta_testcases: # add to results meta_test_failedverdicts_outlog[i][mtest] = copy.deepcopy(\ meta_test_failedverdicts_outlog[i]\ [self.tests_duplicates_map[mtest]]) for mtest in dup_toadd_test: # remove from results del meta_test_failedverdicts_outlog[i][mtest] if fault_test_execution_matrix_file is not None: # Load or Create the matrix fault_test_execution_matrix = common_matrices.ExecutionMatrix( \ filename=fault_test_execution_matrix_file, \ non_key_col_list=meta_testcases) ERROR_HANDLER.assert_true(fault_test_execution_matrix.is_empty(), \ "matrix must be empty. Filename is:" " "+fault_test_execution_matrix_file, __file__) failverdict2val = { common_mix.GlobalConstants.FAIL_TEST_VERDICT: \ fault_test_execution_matrix.getActiveCellDefaultVal(), common_mix.GlobalConstants.PASS_TEST_VERDICT: \ fault_test_execution_matrix.getInactiveCellVal(), common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT: \ fault_test_execution_matrix.getUncertainCellDefaultVal(), } cells_dict = {} for meta_testcase in meta_test_failedverdicts_outlog[0]: cells_dict[meta_testcase] = failverdict2val[\ meta_test_failedverdicts_outlog[0][meta_testcase]] fault_test_execution_matrix.add_row_by_key(self.FAULT_MATRIX_KEY, \ cells_dict, serialize=True) if fault_test_execution_execoutput_file is not None: # Load or Create the data object fault_test_execution_execoutput = common_matrices.OutputLogData( \ filename=fault_test_execution_execoutput_file) ERROR_HANDLER.assert_true(\ fault_test_execution_execoutput.is_empty(), \ "outlog data must be empty", __file__) fault_test_execution_execoutput.add_data(\ {self.PROGRAM_EXECOUTPUT_KEY: \ meta_test_failedverdicts_outlog[1]}, \ serialize=True) # @Checkpoint: Finished detailed_exectime = {} for ttoolalias in testcases_by_tool.keys(): tt = self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY] detailed_exectime[ttoolalias] = (\ tt.get_checkpointer().get_execution_time(),\ tt.get_checkpointer().get_detailed_execution_time()) checkpoint_handler.set_finished( \ detailed_exectime_obj=detailed_exectime) if finish_destroy_checkpointer: checkpoint_handler.destroy() return meta_test_failedverdicts_outlog
def runtests(self, meta_testcases=None, exe_path_map=None, env_vars=None, \ stop_on_failure=False, \ per_test_timeout=None, \ use_recorded_timeout_times=None, \ recalculate_execution_times=False, \ fault_test_execution_matrix_file=None, \ fault_test_execution_execoutput_file=None, \ with_outlog_hash=True, \ test_prioritization_module=None, \ parallel_test_count=1, \ parallel_test_scheduler=None, \ restart_checkpointer=False, finish_destroy_checkpointer=True): ''' Execute the list of test cases with the given executable and say, for each test case, whether it failed :param meta_testcases: list of test cases to execute :param exe_path_map: string representing the file system path to the executable to execute with the tests :param env_vars: dict of environment variables to set before executing each test ({<variable>: <value>}) :param stop_on_failure: decide whether to stop the test execution once a test fails :param fault_test_execution_matrix_file: Optional matrix file to store the tests' pass fail execution data :param fault_test_execution_execoutput_file: Optional output log file to store the tests' execution actual output (hashed) :param with_outlog_hash: decide whether to return outlog hash :param test_prioritization_module: Specify the test prioritization module. (TODO: Implement support) :param parallel_test_count: Specify the number of parallel test Execution. must be an integer >= 1 :param parallel_test_scheduler: Specify the function that will handle parallel test scheduling by tool, using the test execution optimizer. (TODO: Implement support) :type restart_checkointer: bool :param restart_checkointer: Decide whether to discard checkpoint and restart anew. :type finish_destroy_checkpointer: bool :param finish_destroy_checkpointer: Decide whether to automatically destroy the checkpointer when done or not Useful is caller has a checkpointer to update. :returns: dict of testcase and their failed verdict. {<test case name>: <True if failed, False if passed, UNCERTAIN_TEST_VERDICT if uncertain>} If stop_on_failure is True, only return the tests that have been executed until the failure ''' # FIXME: Make sure that the support are implemented for # parallelism and test prioritization. Remove the code bellow # once supported: ERROR_HANDLER.assert_true(test_prioritization_module is None, \ "Must implement test prioritization support here", \ __file__) ERROR_HANDLER.assert_true(parallel_test_count <= 1, \ "Must implement parallel tests execution support here", \ __file__) ERROR_HANDLER.assert_true(parallel_test_scheduler is None, \ "Must implement parallel tests execution support here", \ __file__) #~FIXMEnd # Check arguments Validity if exe_path_map is None: exe_path_map = self._get_default_exe_path_map() ERROR_HANDLER.assert_true(parallel_test_count > 0, \ "invalid parallel test execution count: {}. {}".format( \ parallel_test_count, "must be >= 1")) # @Checkpoint: create a checkpoint handler cp_func_name = "runtests" cp_task_id = 1 checkpoint_handler = \ CheckPointHandler(self.get_checkpoint_state_object()) if restart_checkpointer: checkpoint_handler.restart() if checkpoint_handler.is_finished(): logging.warning("%s %s" %("The function 'runtests' is finished", \ "according to checkpoint, but called again. None returned")) if common_mix.confirm_execution("%s %s" % ( \ "Function 'runtests' is already", \ "finished, do you want to restart?")): checkpoint_handler.restart() logging.info("Restarting the finished 'runtests'") else: ERROR_HANDLER.error_exit(err_string="%s %s %s" % (\ "Execution halted. Cannot continue because no value", \ " can be returned. Check the results of the", \ "finished execution"), call_location=__file__) # @Checkpoint: Get the saved payload (data kapt for each tool) # pair list of testfailed verdict and execution output meta_test_failedverdicts_outlog = \ checkpoint_handler.get_optional_payload() if meta_test_failedverdicts_outlog is None: meta_test_failedverdicts_outlog = [{}, {}] # Make sure the tests are unique ERROR_HANDLER.assert_true(len(meta_testcases) == \ len(set(meta_testcases)), \ "not all tests are unique", __file__) testcases_by_tool = {} for meta_testcase in meta_testcases: ttoolalias, testcase = \ DriversUtils.reverse_meta_element(meta_testcase) if ttoolalias not in testcases_by_tool: testcases_by_tool[ttoolalias] = [] testcases_by_tool[ttoolalias].append(testcase) found_a_failure = False for tpos, ttoolalias in enumerate(testcases_by_tool.keys()): # @Checkpoint: Check whether already executed if not checkpoint_handler.is_to_execute(func_name=cp_func_name, \ taskid=cp_task_id, \ tool=ttoolalias): continue # Actual execution ttool = \ self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY] test_failed_verdicts, test_execoutput = ttool.runtests( \ testcases_by_tool[ttoolalias], \ exe_path_map, env_vars, \ stop_on_failure, \ per_test_timeout=per_test_timeout, use_recorded_timeout_times=\ use_recorded_timeout_times, \ recalculate_execution_times=\ recalculate_execution_times, \ with_outlog_hash=with_outlog_hash) for testcase in test_failed_verdicts: meta_testcase = \ DriversUtils.make_meta_element(testcase, ttoolalias) meta_test_failedverdicts_outlog[0][meta_testcase] = \ test_failed_verdicts[testcase] meta_test_failedverdicts_outlog[1][meta_testcase] = \ test_execoutput[testcase] if test_failed_verdicts[testcase] == \ common_mix.GlobalConstants.COMMAND_UNCERTAIN: found_a_failure = True # @Checkpoint: Chekpointing checkpoint_handler.do_checkpoint(func_name=cp_func_name, \ taskid=cp_task_id, \ tool=ttoolalias, \ opt_payload=meta_test_failedverdicts_outlog) if stop_on_failure and found_a_failure: # @Checkpoint: Chekpointing for remaining tools for rem_tool in list(testcases_by_tool.keys())[tpos + 1:]: checkpoint_handler.do_checkpoint(func_name=cp_func_name, \ taskid=cp_task_id, \ tool=rem_tool, \ opt_payload=meta_test_failedverdicts_outlog) break if stop_on_failure: # Make sure the non executed test has the uncertain value (None) if len(meta_test_failedverdicts_outlog[0]) < len(meta_testcases): for meta_testcase in set(meta_testcases) - \ set(meta_test_failedverdicts_outlog[0]): meta_test_failedverdicts_outlog[0][meta_testcase] = \ common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT meta_test_failedverdicts_outlog[1][meta_testcase] = \ common_matrices.OutputLogData.\ UNCERTAIN_TEST_OUTLOGDATA ERROR_HANDLER.assert_true(len(meta_test_failedverdicts_outlog[0]) == \ len(meta_testcases), \ "Not all tests have a verdict reported", __file__) if fault_test_execution_matrix_file is not None: # Load or Create the matrix fault_test_execution_matrix = common_matrices.ExecutionMatrix( \ filename=fault_test_execution_matrix_file, \ non_key_col_list=meta_testcases) ERROR_HANDLER.assert_true(fault_test_execution_matrix.is_empty(), \ "matrix must be empty", __file__) failverdict2val = { common_mix.GlobalConstants.FAIL_TEST_VERDICT: \ fault_test_execution_matrix.getActiveCellDefaultVal(), common_mix.GlobalConstants.PASS_TEST_VERDICT: \ fault_test_execution_matrix.getInactiveCellVal(), common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT: \ fault_test_execution_matrix.getUncertainCellDefaultVal(), } cells_dict = {} for meta_testcase in meta_test_failedverdicts_outlog[0]: cells_dict[meta_testcase] = failverdict2val[\ meta_test_failedverdicts_outlog[0][meta_testcase]] fault_test_execution_matrix.add_row_by_key(self.FAULT_MATRIX_KEY, \ cells_dict, serialize=True) if fault_test_execution_execoutput_file is None: meta_test_failedverdicts_outlog[1] = None else: # Load or Create the data object fault_test_execution_execoutput = common_matrices.OutputLogData( \ filename=fault_test_execution_execoutput_file) ERROR_HANDLER.assert_true(\ fault_test_execution_execoutput.is_empty(), \ "outlog data must be empty", __file__) fault_test_execution_execoutput.add_data(\ {self.PROGRAM_EXECOUTPUT_KEY: \ meta_test_failedverdicts_outlog[1]}, \ serialize=True) # @Checkpoint: Finished detailed_exectime = {} for ttoolalias in testcases_by_tool.keys(): tt = self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY] detailed_exectime[ttoolalias] = (\ tt.get_checkpointer().get_execution_time(),\ tt.get_checkpointer().get_detailed_execution_time()) checkpoint_handler.set_finished( \ detailed_exectime_obj=detailed_exectime) if finish_destroy_checkpointer: checkpoint_handler.destroy() return meta_test_failedverdicts_outlog