コード例 #1
0
ファイル: test_mix.py プロジェクト: thierry-tct/muteria
    def test_confirm_execution(self):
        yes_stub = lambda _: 'y'
        no_stub = lambda _: 'n'

        if sys.version_info.major < 3:
            import __builtin__
            stored = __builtin__.raw_input
        else:
            import builtins
            stored = builtins.input

        if sys.version_info.major < 3:
            __builtin__.raw_input = yes_stub
        else:
            builtins.input = yes_stub
        self.assertTrue(common_mix.confirm_execution("testing y?"))

        if sys.version_info.major < 3:
            __builtin__.raw_input = no_stub
        else:
            builtins.input = no_stub
        self.assertFalse(common_mix.confirm_execution("testing n?"))

        if sys.version_info.major < 3:
            __builtin__.raw_input = stored
        else:
            builtins.input = stored
コード例 #2
0
ファイル: fs.py プロジェクト: Marvinmw/muteria
 def _get_from_file(self):
     contain = None
     trybackup = True
     if os.path.isfile(self.store_filepath):
         try:
             contain = loadJSON(self.store_filepath)
             trybackup = False
         except ValueError:
             trybackup = True
     if trybackup and os.path.isfile(self.backup_filepath):
         try:
             contain = loadJSON(self.backup_filepath)
         except ValueError:
             ERROR_HANDLER.error_exit("%s %s" % (\
                                     "Both Checkpoint store_file and", \
                                     "backup file are invalid"), __file__)
         if not common_mix.confirm_execution("%s %s" % ( \
                     "The checkpoint store_file is invalid but backup", \
                     "is valid. Do you want to use backup?")):
             ERROR_HANDLER.error_exit("%s %s" % (\
                                 "Execution terminated due to", \
                                 "invalid Checkpoint store_file"), __file__)
     
     # Check consistency or update obj
     if contain is not None:
         for key in [self.DETAILED_TIME_KEY, self.AGG_TIME_KEY, \
                                                 self.CHECKPOINT_DATA_KEY]:
             if key not in contain:
                 file_used = self.backup_filepath if trybackup \
                                                 else self.store_filepath
                 ERROR_HANDLER.error_exit("%s (%s). %s %s" % \
                             ("Invalid checkpoint file", file_used, \
                             "do not contain the data for", key), __file__)
     return contain
コード例 #3
0
ファイル: explorer.py プロジェクト: Marvinmw/muteria
    def __init__(self, root_outdir):
        self.root_outdir = root_outdir
        self.timeline_outdirs = []

        # get timeline_outdirs
        self.timeline_outdirs.append(os.path.join(self.root_outdir, \
                                                            self.LATEST_NAME))
        
        tmp_outdirs = []
        if os.path.isdir(root_outdir):
            for fd in os.listdir(root_outdir):
                if self._is_history_file_dir(fd):
                    tmp_outdirs.append(os.path.join(self.root_outdir, fd))
        
        tmp_outdirs.sort(key=lambda x: self._history_of_outdir(x))
        tmp_histories = [self._history_of_outdir(x) for x in tmp_outdirs]

        fix_missing_history = False
        for ind in range(len(tmp_histories)):
            new_outdir = tmp_outdirs[ind]
            if tmp_histories[ind] != ind + 1:
                if not fix_missing_history:
                    fix_missing_history = common_mix.confirm_execution(\
                                            "Missing history: {}. {}".format(\
                                ind+1, "Do you want to automatically fix it?"))
                    ERROR_HANDLER.assert_true(fix_missing_history, \
                                    "No history should be missing {}".format(\
                                    "when executing. Fix manually."), __file__)
                else:
                    new_outdir = self._change_history(tmp_outdirs[ind], ind+1)
                    shutil.move(tmp_outdirs[ind], new_outdir)
            self.timeline_outdirs.append(new_outdir)

        # Create explorers
        self.explorer_list = self._create_explorers(self.timeline_outdirs)
コード例 #4
0
    def _get_untracked_and_diffed(self, repo_):
        try:
            untracked_file_list = repo_.untracked_files
        except (UnicodeDecodeError, UnicodeEncodeError):
            # XXX: error in git python
            non_unicode_files = []
            non_unicode_dirs = []

            def has_unicode_error(name):
                try:
                    name.encode('ascii').decode('unicode_escape')\
                                                            .encode('latin1')
                    return False
                except (UnicodeDecodeError, UnicodeEncodeError):
                    return True

            #~ def has_unicode_error()

            for root_, dirs_, files_ in os.walk(repo_.working_tree_dir):
                for sub_d in dirs_:
                    sd_path = os.path.join(root_, sub_d)
                    if has_unicode_error(sd_path):
                        non_unicode_dirs.append(sd_path)
                for f_ in files_:
                    f_path = os.path.join(root_, f_)
                    if has_unicode_error(f_path):
                        non_unicode_files.append(f_path)
            if common_mix.confirm_execution("Do you want to delete non "
                                            "unicode files and dirs?"):
                # XXX: delete non unicode files
                for fd in non_unicode_dirs + non_unicode_files:
                    if os.path.isdir(fd):
                        func = shutil.rmtree
                    elif os.path.isfile(fd):
                        func = os.remove
                    elif os.path.islink(fd):
                        func = os.unlink
                    else:
                        ERROR_HANDLER.error_exit(\
                                "fd is not file, dir or link: {}".format(fd))

                    try:
                        func(fd)
                    except PermissionError:
                        # TODO: avoid using os.system here
                        os.system('sudo chmod -R 777 {}'.format(\
                                                        os.path.dirname(fd)))
                        func(fd)
                logging.debug("The following files and dirs were removed: {}"\
                                .format(non_unicode_files + non_unicode_dirs))
            else:
                ERROR_HANDLER.error_exit(
                    "Non unicode file name of untracked "
                    "files in the repo. Fix it and rerun", __file__)
            untracked_file_list = repo_.untracked_files
        res = set(untracked_file_list) | set(self._get_diffed(repo_))
        return res
コード例 #5
0
ファイル: executor.py プロジェクト: Marvinmw/muteria
    def _initialize_output_structure(self, cleanstart=False):
        if cleanstart:
            if common_mix.confirm_execution(
                                    "Do you really want to clean the outdir?"):
                self.head_explorer.clean_create_and_get_dir(\
                                            outdir_struct.TOP_OUTPUT_DIR_KEY)
            else:
                ERROR_HANDLER.error_exit("Cancelled Cleanstart!", __file__)
        else:
            self.head_explorer.get_or_create_and_get_dir(\
                                            outdir_struct.TOP_OUTPUT_DIR_KEY)

        for folder in [outdir_struct.CONTROLLER_DATA_DIR, \
                                    outdir_struct.CTRL_CHECKPOINT_DIR, \
                                    outdir_struct.CTRL_LOGS_DIR, \
                                    outdir_struct.EXECUTION_TMP_DIR]:
            self.head_explorer.get_or_create_and_get_dir(folder)
コード例 #6
0
ファイル: matrices.py プロジェクト: thierry-tct/muteria
    def add_data (self, data_dict, check_all=True, override_existing=False, \
                                ask_confirmation_with_exist_missing=False, \
                                                            serialize=False):
        if check_all:
            ERROR_HANDLER.assert_true(\
                            type(data_dict) == dict and len(data_dict) > 0, \
                                        "expecting a non empty dict", __file__)
            for o, o_obj in data_dict.items():
                if len(o_obj) == 0: 
                    continue
                ERROR_HANDLER.assert_true(\
                            type(o_obj) == dict, \
                            "expecting dict for value data: objective is "+o, \
                                                                    __file__)
                for t, t_obj in o_obj.items():
                    ERROR_HANDLER.assert_true(set(t_obj) == self.Dat_Keys , \
                                "Invalid data for o "+o+' and t '+t, __file__)

        intersect_objective = set(self.data) & set(data_dict)
        onlynew_objective = set(data_dict) - intersect_objective
        if not override_existing:
            for objective in intersect_objective:
                ERROR_HANDLER.assert_true(len(set(self.data[objective]) & \
                                            set(data_dict[objective])) == 0, \
                            "Override_existing not set but there is overlap", \
                                                                    __file__)            
        for objective in intersect_objective:
            if ask_confirmation_with_exist_missing and \
                                        len(set(self.data[objective]) & \
                                            set(data_dict[objective])) != 0:
                ERROR_HANDLER.assert_true(common_mix.confirm_execution(\
                                            "Some values are existing, "
                                            "do you confirm their override?"),\
                             "Existing values were not overriden", __file__)
            self.data[objective].update(self._mem_optimize_sub_dat(\
                                                        data_dict[objective]))
        for objective in onlynew_objective:
            self.data[objective] = self._mem_optimize_sub_dat(\
                                                        data_dict[objective])
        if serialize:
            self.serialize()
コード例 #7
0
ファイル: configurations.py プロジェクト: muteria/muteria
def save_common_default_template(filename=None):
    """ Write the default raw configuration template
        >>> import muteria.configmanager.configurations as mcc
        >>> mcc.save_common_default_template()
    """
    if filename is None:
        filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),\
                                            "defaults", "common_defaults.py")
    row_list = get_full_rawconf_template()
    # save into out file
    if os.path.isfile(filename):
        if not common_mix.confirm_execution(\
                        "The filename to save configuration template "
                        "exists. Do you want to override it?"):
            return


    header = "\"\"\" Defaults parameters that are common to all languages\n" +\
            "\"\"\"\n"
    header += "from __future__ import print_function\n\n"
    header += "{} {}\n".format("from muteria.configmanager.configurations", \
                                "import SessionMode")

    header += "{} {}\n".format("from muteria.configmanager.configurations", \
                                "import TestcaseToolsConfig")
    header += "{} {}\n".format("from muteria.configmanager.configurations", \
                                "import CriteriaToolsConfig")
    header += "{} {}\n".format("from muteria.configmanager.configurations", \
                                "import ToolUserCustom")

    header += "\n{} {}\n".format("from muteria.drivers.criteria", \
                                "import TestCriteria")
    header += "{} {}\n".format("from muteria.drivers.criteria", \
                                "import CriteriaToolType")
    header += "\n{} {}\n".format("from muteria.drivers.testgeneration", \
                                "import TestToolType")
    with open(filename, 'w') as f:
        f.write(header + '\n')
        for row in row_list:
            f.write(row + '\n')
コード例 #8
0
    def runtests(self, meta_testcases=None, exe_path_map=None, env_vars=None, \
                        stop_on_failure=False, \
                        per_test_timeout=None, \
                        use_recorded_timeout_times=None, \
                        recalculate_execution_times=False, \
                        fault_test_execution_matrix_file=None, \
                        fault_test_execution_execoutput_file=None, \
                        with_output_summary=True, \
                        hash_outlog=None, \
                        test_prioritization_module=None, \
                        parallel_test_count=1, \
                        parallel_test_scheduler=None, \
                        restart_checkpointer=False,
                        finish_destroy_checkpointer=True):
        '''
        Execute the list of test cases with the given executable and 
        say, for each test case, whether it failed

        :param meta_testcases: list of test cases to execute
        :param exe_path_map: string representing the file system path to 
                        the executable to execute with the tests
        :param env_vars: dict of environment variables to set before
                        executing each test ({<variable>: <value>})
        :param stop_on_failure: decide whether to stop the test 
                        execution once a test fails
        :param fault_test_execution_matrix_file: Optional matrix file 
                        to store the tests' pass fail execution data
        :param fault_test_execution_execoutput_file: Optional output log file 
                        to store the tests' execution actual output (hashed)
        :param with_output_summary: decide whether to return outlog hash 
        :type hash_outlog: bool
        :hash_outlog: decide whether to hash the outlog or not
        :param test_prioritization_module: Specify the test prioritization
                        module. 
                        (TODO: Implement support)
        :param parallel_test_count: Specify the number of parallel test
                        Execution. must be an integer >= 1 or None.
                        When None, the max possible value is used.
        :param parallel_test_scheduler: Specify the function that will
                        handle parallel test scheduling by tool, using
                        the test execution optimizer. 
                        (TODO: Implement support)

        :type restart_checkointer: bool
        :param restart_checkointer: Decide whether to discard checkpoint
                        and restart anew.

        :type finish_destroy_checkpointer: bool
        :param finish_destroy_checkpointer: Decide whether to automatically 
                        destroy the checkpointer when done or not
                        Useful is caller has a checkpointer to update. 

        :returns: dict of testcase and their failed verdict.
                 {<test case name>: <True if failed, False if passed,
                    UNCERTAIN_TEST_VERDICT if uncertain>}
                 If stop_on_failure is True, only return the tests that 
                 have been executed until the failure
        '''

        ERROR_HANDLER.assert_true(meta_testcases is not None, \
                                            "Must specify testcases", __file__)

        # FIXME: Make sure that the support are implemented for
        # parallelism and test prioritization. Remove the code bellow
        # once supported:
        ERROR_HANDLER.assert_true(test_prioritization_module is None, \
                        "Must implement test prioritization support here", \
                                                                    __file__)
        ERROR_HANDLER.assert_true(parallel_test_scheduler is None, \
                    "Must implement parallel tests execution support here", \
                                                                    __file__)
        #~FIXMEnd

        # Check arguments Validity
        if exe_path_map is None:
            exe_path_map = self._get_default_exe_path_map()

        if hash_outlog is None:
            hash_outlog = self.hash_outlog

        ERROR_HANDLER.assert_true(parallel_test_count is None \
                                        or parallel_test_count >= 1, \
                                "invalid parallel tests count ({})".format(\
                                                parallel_test_count), __file__)

        # @Checkpoint: create a checkpoint handler
        cp_func_name = "runtests"
        cp_task_id = 1
        checkpoint_handler = \
                CheckPointHandler(self.get_checkpoint_state_object())
        if restart_checkpointer:
            checkpoint_handler.restart()
        if checkpoint_handler.is_finished():
            logging.warning("%s %s" %("The function 'runtests' is finished", \
                "according to checkpoint, but called again. None returned"))
            if common_mix.confirm_execution("%s %s" % ( \
                                        "Function 'runtests' is already", \
                                        "finished, do you want to restart?")):
                checkpoint_handler.restart()
                logging.info("Restarting the finished 'runtests'")
            else:
                ERROR_HANDLER.error_exit(err_string="%s %s %s" % (\
                        "Execution halted. Cannot continue because no value", \
                        " can be returned. Check the results of the", \
                        "finished execution"), call_location=__file__)

        # @Checkpoint: Get the saved payload (data kapt for each tool)
        # pair list of testfailed verdict and execution output
        meta_test_failedverdicts_outlog = \
                                    checkpoint_handler.get_optional_payload()
        if meta_test_failedverdicts_outlog is None:
            meta_test_failedverdicts_outlog = [{}, {}]

        # Make sure the tests are unique
        ERROR_HANDLER.assert_true(len(meta_testcases) == \
                                                len(set(meta_testcases)), \
                                        "not all tests are unique", __file__)

        # For fdupes
        if len(self.tests_duplicates_map) > 0:
            meta_testcases_backup = meta_testcases
            meta_testcases = set(meta_testcases)
            dups_remove_meta_testcases = meta_testcases & \
                                                set(self.tests_duplicates_map)
            dup_toadd_test = {self.tests_duplicates_map[v] for v in \
                                dups_remove_meta_testcases} - meta_testcases
            meta_testcases = (meta_testcases - dups_remove_meta_testcases) \
                                                            | dup_toadd_test

        testcases_by_tool = {}
        for meta_testcase in meta_testcases:
            ttoolalias, testcase = \
                            DriversUtils.reverse_meta_element(meta_testcase)
            if ttoolalias not in testcases_by_tool:
                testcases_by_tool[ttoolalias] = []
            testcases_by_tool[ttoolalias].append(testcase)

        candidate_aliases = []
        for tpos, ttoolalias in enumerate(testcases_by_tool.keys()):
            # @Checkpoint: Check whether already executed
            if not checkpoint_handler.is_to_execute(func_name=cp_func_name, \
                                                taskid=cp_task_id, \
                                                tool=ttoolalias):
                continue
            candidate_aliases.append(ttoolalias)

        # parallelism strategies
        PARA_FULL_DOUBLE = 0
        PARA_ALT_TOOLS_AND_TESTS = 1
        PARA_TOOLS_ONLY = 2
        PARA_TOOLS_TESTS_AS_TOOLS = 3

        parallel_strategy = PARA_TOOLS_ONLY

        # minimum number of tests (accross) for parallelism
        ptest_tresh = 5
        # minimum number of tests (of the given tool) for tool parallelism
        sub_ptest_thresh = 3

        shared_loc = multiprocessing.RLock()

        parallel_test_count_by_tool = {ta: 1 for ta in candidate_aliases}

        # tool with parallel test exec
        # TODO: find way to pass parallel count here
        if parallel_test_count is None:
            #parallel_test_count = min(10, multiprocessing.cpu_count())
            parallel_test_count = min(20, 2 * multiprocessing.cpu_count())

        cand_alias_joblib = []
        cand_alias_for = []

        para_tools = []
        para_tools = [tt for tt in candidate_aliases if \
                            (len(testcases_by_tool[tt]) >= sub_ptest_thresh \
                              and self.testcases_configured_tools[tt]\
                               [self.TOOL_OBJ_KEY].can_run_tests_in_parallel())
                        ]

        actual_parallel_cond = len(candidate_aliases) > 1 \
                                     and len(meta_testcases) >= ptest_tresh \
                                        and parallel_test_count is not None \
                                        and parallel_test_count > 1

        if parallel_strategy == PARA_ALT_TOOLS_AND_TESTS:
            # the para_tools will run without parallelism, give them all threads
            for tt in para_tools:
                parallel_test_count_by_tool[tt] = parallel_test_count
            seq_tools = list(set(candidate_aliases) - set(para_tools))
            if len(seq_tools) > 1 and actual_parallel_cond:
                cand_alias_joblib = seq_tools
                cand_alias_for = para_tools
            else:
                cand_alias_for = candidate_aliases
        elif parallel_strategy == PARA_TOOLS_ONLY:
            if actual_parallel_cond:
                cand_alias_joblib = candidate_aliases
            else:
                cand_alias_for = candidate_aliases
        elif parallel_strategy == PARA_FULL_DOUBLE:
            # use parallel
            sub_parallel_count = 0 if parallel_test_count is None else \
                        parallel_test_count - len(parallel_test_count_by_tool)
            if sub_parallel_count > 0:
                para_tools.sort(reverse=True, \
                                        key=lambda x: len(testcases_by_tool[x]))
                para_tools_n_tests = sum(\
                            [len(testcases_by_tool[tt]) for tt in para_tools])

                used = 0
                for tt in para_tools:
                    quota = int(len(testcases_by_tool[tt]) * \
                                    sub_parallel_count / para_tools_n_tests)
                    parallel_test_count_by_tool[tt] += quota
                    used += quota
                for tt in para_tools:
                    if used == sub_parallel_count:
                        break
                    parallel_test_count_by_tool[tt] += 1

            if actual_parallel_cond:
                cand_alias_joblib = candidate_aliases
            else:
                cand_alias_for = candidate_aliases
        elif parallel_strategy == PARA_TOOLS_TESTS_AS_TOOLS:
            # split the tests of one tool and
            # make the same tool run multiple times
            ERROR_HANDLER.error_exit("To Be implemented: same tool many times")
        else:
            ERROR_HANDLER.error_exit("Invalid parallel startegy")

        def tool_parallel_test_exec(ttoolalias):
            # Actual execution
            found_a_failure = False
            # Whether the execution was unsuccessful
            test_error = False
            ttool = \
                self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY]
            test_failed_verdicts, test_execoutput = ttool.runtests( \
                                testcases_by_tool[ttoolalias], \
                                exe_path_map, env_vars, \
                                stop_on_failure, \
                                per_test_timeout=per_test_timeout,
                                use_recorded_timeout_times=\
                                    use_recorded_timeout_times, \
                                recalculate_execution_times=\
                                    recalculate_execution_times, \
                                with_output_summary=\
                                            with_output_summary, \
                                hash_outlog=hash_outlog, \
                                parallel_count=\
                                    parallel_test_count_by_tool[ttoolalias])
            with shared_loc:
                for testcase in test_failed_verdicts:
                    meta_testcase = DriversUtils.make_meta_element(\
                                                        testcase, ttoolalias)
                    meta_test_failedverdicts_outlog[0][meta_testcase] = \
                                                test_failed_verdicts[testcase]
                    meta_test_failedverdicts_outlog[1][meta_testcase] = \
                                                    test_execoutput[testcase]
                    if not found_a_failure \
                                and test_failed_verdicts[testcase] == \
                                common_mix.GlobalConstants.FAIL_TEST_VERDICT:
                        found_a_failure = True
                    if not test_error \
                                and test_failed_verdicts[testcase] == \
                            common_mix.GlobalConstants.TEST_EXECUTION_ERROR:
                        test_error = True

                # @Checkpoint: Chekpointing
                checkpoint_handler.do_checkpoint(func_name=cp_func_name, \
                                taskid=cp_task_id, \
                                tool=ttoolalias, \
                                opt_payload=meta_test_failedverdicts_outlog)
            return found_a_failure, test_error

        #~ def tool_parallel_test_exec()

        if len(cand_alias_joblib) > 0:
            parallel_count_ = min(len(cand_alias_joblib), parallel_test_count)
            joblib.Parallel(n_jobs=parallel_count_, require='sharedmem')\
                    (joblib.delayed(tool_parallel_test_exec)(ttoolalias) \
                        for ttoolalias in cand_alias_joblib)
        if len(cand_alias_for) > 0:
            for tpos, ttoolalias in enumerate(cand_alias_for):
                found_a_failure, test_error = \
                                        tool_parallel_test_exec(ttoolalias)
                if stop_on_failure and found_a_failure:
                    # @Checkpoint: Chekpointing for remaining tools
                    for rem_tool in list(testcases_by_tool.keys())[tpos + 1:]:
                        checkpoint_handler.do_checkpoint(\
                                func_name=cp_func_name, \
                                taskid=cp_task_id, \
                                tool=rem_tool, \
                                opt_payload=meta_test_failedverdicts_outlog)
                    break

        if stop_on_failure:
            # Make sure the non executed test has the uncertain value (None)
            if len(meta_test_failedverdicts_outlog[0]) < len(meta_testcases):
                for meta_testcase in set(meta_testcases) - \
                                    set(meta_test_failedverdicts_outlog[0]):
                    meta_test_failedverdicts_outlog[0][meta_testcase] = \
                            common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT
                    meta_test_failedverdicts_outlog[1][meta_testcase] = \
                            common_matrices.OutputLogData.\
                                                    UNCERTAIN_TEST_OUTLOGDATA

        ERROR_HANDLER.assert_true(len(meta_test_failedverdicts_outlog[0]) == \
                                                        len(meta_testcases), \
                    "mismatch between number of tests and reported verdicts:"
                    + " Tests without verdict are {};".format(\
                               set(meta_testcases) - \
                                    set(meta_test_failedverdicts_outlog[0])) \
                    + " Test not in testlist are {}.".format(\
                               set(meta_test_failedverdicts_outlog[0]) - \
                                    set(meta_testcases)), \
                                                                     __file__)

        # For fdupes
        if len(self.tests_duplicates_map) > 0:
            meta_testcases = meta_testcases_backup
            for i in (0, 1):
                for mtest in dups_remove_meta_testcases:
                    # add to results
                    meta_test_failedverdicts_outlog[i][mtest] = copy.deepcopy(\
                                        meta_test_failedverdicts_outlog[i]\
                                            [self.tests_duplicates_map[mtest]])
                for mtest in dup_toadd_test:
                    # remove from results
                    del meta_test_failedverdicts_outlog[i][mtest]

        if fault_test_execution_matrix_file is not None:
            # Load or Create the matrix
            fault_test_execution_matrix = common_matrices.ExecutionMatrix( \
                                filename=fault_test_execution_matrix_file, \
                                            non_key_col_list=meta_testcases)
            ERROR_HANDLER.assert_true(fault_test_execution_matrix.is_empty(), \
                                "matrix must be empty. Filename is:"
                                " "+fault_test_execution_matrix_file, __file__)
            failverdict2val = {
                common_mix.GlobalConstants.FAIL_TEST_VERDICT: \
                        fault_test_execution_matrix.getActiveCellDefaultVal(),
                common_mix.GlobalConstants.PASS_TEST_VERDICT: \
                            fault_test_execution_matrix.getInactiveCellVal(),
                common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT: \
                    fault_test_execution_matrix.getUncertainCellDefaultVal(),
            }
            cells_dict = {}
            for meta_testcase in meta_test_failedverdicts_outlog[0]:
                cells_dict[meta_testcase] = failverdict2val[\
                            meta_test_failedverdicts_outlog[0][meta_testcase]]

            fault_test_execution_matrix.add_row_by_key(self.FAULT_MATRIX_KEY, \
                                                cells_dict, serialize=True)

        if fault_test_execution_execoutput_file is not None:
            # Load or Create the data object
            fault_test_execution_execoutput = common_matrices.OutputLogData( \
                                filename=fault_test_execution_execoutput_file)
            ERROR_HANDLER.assert_true(\
                            fault_test_execution_execoutput.is_empty(), \
                                        "outlog data must be empty", __file__)
            fault_test_execution_execoutput.add_data(\
                                    {self.PROGRAM_EXECOUTPUT_KEY: \
                                         meta_test_failedverdicts_outlog[1]}, \
                                                                serialize=True)

        # @Checkpoint: Finished
        detailed_exectime = {}
        for ttoolalias in testcases_by_tool.keys():
            tt = self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY]
            detailed_exectime[ttoolalias] = (\
                        tt.get_checkpointer().get_execution_time(),\
                        tt.get_checkpointer().get_detailed_execution_time())

        checkpoint_handler.set_finished( \
                                    detailed_exectime_obj=detailed_exectime)

        if finish_destroy_checkpointer:
            checkpoint_handler.destroy()

        return meta_test_failedverdicts_outlog
コード例 #9
0
ファイル: matrices.py プロジェクト: thierry-tct/muteria
    def update_with_other_matrix(self, other_matrix, \
                                override_existing=False, allow_missing=False, \
                                ask_confirmation_with_exist_missing=False, \
                                serialize=False):
        """ Update this matrix using the other matrix
        :param other_matrix: The matrix to use to update this matrix
        :param override_existing: (bool) decide whether existing cell's 
                        value (row, column) of this matrix should be 
                        overrided by the update.
                        Note that if this is disabled, no cell (row, col)
                        of this matrix must exist in other_matrix, or this
                        will fail.
        :param allow_missing: (bool) decide whether missing cell are allowed
                        when merging (cells that only belong to one matrix
                        while thre are other cells on same row or same 
                        column that belong to the two matrices).
                        Note that if this is disable. there should not be
                        any such cell during merging or this will fail.
        :param serialize: (bool) decides whether to serialize the updated
                        matrix (this matrix), after update, into its file.
        :return: nothing

        Example:
        >>> nc = ['a', 'b', 'c']
        >>> mat = ExecutionMatrix(non_key_col_list=nc)
        >>> mat.add_row_by_key('k', [1, 2, 3])
        >>> mat.add_row_by_key('r', [4, 0, 1])
        >>> nc_o = ['b', 'e']
        >>> mat_other = ExecutionMatrix(non_key_col_list=nc_o)
        >>> mat_other.add_row_by_key('h', [200, 100])
        >>> mat_other.add_row_by_key('r', [10, 11])
        >>> mat.update_with_other_matrix(mat_other, override_existing=True, \
                                                            allow_missing=True)
        >>> list(mat.get_keys())
        ['k', 'r', 'h']
        >>> list(mat.get_nonkey_colname_list())
        ['a', 'b', 'c', 'e']
        >>> k_v_d = mat._get_key_values_dict()
        >>> uncert = mat.getUncertainCellDefaultVal()
        >>> k_v_d['k'] == {'a': 1, 'b': 2, 'c': 3, 'e': uncert}
        True
        >>> k_v_d['r'] == {'a': 4, 'b': 10, 'c': 1, 'e': 11}
        True
        >>> k_v_d['h'] == {'a': uncert, 'b': 200, 'c': uncert, 'e': 100}
        True
        """
        #other_matrix_df = other_matrix.to_pandas_df()
        # Check values overlap
        row_existing = set(self.get_keys()) & set(other_matrix.get_keys())
        col_existing = set(self.get_nonkey_colname_list()) & \
                            set(other_matrix.get_nonkey_colname_list())
        if len(row_existing) > 0 and len(col_existing) > 0:
            if ask_confirmation_with_exist_missing and override_existing:
                override_existing = common_mix.confirm_execution(\
                                            "Some cells are existing, "
                                            "do you confirm their override?")
                ERROR_HANDLER.assert_true(override_existing, \
                            "Override_existing not set but there is overlap", \
                                                                    __file__)            

        # Check missing
        if len(row_existing) < len(other_matrix.get_keys()):
            #- Some rows will be added
            if len(col_existing) != len(self.get_nonkey_colname_list()) or \
                            len(col_existing) != \
                                len(other_matrix.get_nonkey_colname_list()):
                if ask_confirmation_with_exist_missing and allow_missing:
                    allow_missing = common_mix.confirm_execution(\
                                            "Some values are missing, "
                                            "do you confirm their presence?")
                ERROR_HANDLER.assert_true(allow_missing, \
                                "allow_missing disable but there are missing",\
                                                                    __file__)

        # Actual update
        ## 1. Create columns that are not in others
        col_to_add = set(other_matrix.get_nonkey_colname_list()) - \
                                            set(self.get_nonkey_colname_list())
        for col in col_to_add:
            self.non_key_col_list.append(col)
            self.dataframe[col] = [self.getUncertainCellDefaultVal()] * \
                                                        len(self.get_keys())
        
        ## 2. Update or insert rows
        extra_cols = set(self.get_nonkey_colname_list()) - \
                                    set(other_matrix.get_nonkey_colname_list())
        missing_extracol_vals = {e_c: self.getUncertainCellDefaultVal() \
                                                        for e_c in extra_cols}
        ### Insert
        new_rows = set(other_matrix.get_keys()) - set(self.get_keys())
        k_v_dict = other_matrix._get_key_values_dict(new_rows)
        for key, values in list(k_v_dict.items()):
            values.update(missing_extracol_vals)
            self.add_row_by_key(key, values, serialize=False)
        ### Update
        k_v_dict = other_matrix._get_key_values_dict(row_existing)
        for key, values in list(k_v_dict.items()):
            #for col in values:
            self.update_cells(key, values)

        if serialize:
            self.serialize()
コード例 #10
0
    def _runtests(self, testcases, exe_path_map, env_vars, \
                                stop_on_failure=False, per_test_timeout=None, \
                                use_recorded_timeout_times=None, \
                                recalculate_execution_times=False, \
                                with_outlog_hash=True, parallel_count=1):
        '''
        Execute the list of test cases with the given executable and 
        say, for each test case, whether it failed.
        Note: Re-implement this if there the tool implements ways to faster
        execute multiple test cases.

        :param testcases: list of test cases to execute
        :param exe_path_map: string representing the file system path to 
                        the executable to execute with the tests
        :param env_vars: dict of environment variables to set before
                        executing each test ({<variable>: <value>})
        :param stop_on_failure: decide whether to stop the test execution once
                        a test fails
        :returns: plitair of:
                - dict of testcase and their failed verdict.
                 {<test case name>: <True if failed, False if passed, 
                    UNCERTAIN_TEST_VERDICT if uncertain>}
                 If stop_on_failure is True, only return the tests that have 
                 been executed until the failure
                - test execution output log hash data object or None
        '''
        # @Checkpoint: create a checkpoint handler (for time)
        checkpoint_handler = CheckPointHandler(self.get_checkpointer())
        if checkpoint_handler.is_finished():
            logging.warning("{} {} {}".format( \
                            "The function 'runtests' is finished according", \
                            "to checkpoint, but called again. None returned", \
                            "\nPlease Confirm reexecution..."))
            if common_mix.confirm_execution("{} {}".format( \
                                "Function 'runtests' is already", \
                                "finished, do yo want to restart?")):
                checkpoint_handler.restart()
                logging.info("Restarting the finished 'runtests'")
            else:
                ERROR_HANDLER.error_exit(err_string="{} {} {}".format( \
                        "Execution halted. Cannot continue because no value", \
                        " can be returned. Check the results of the", \
                        "finished execution"), call_location=__file__)

        if per_test_timeout is None:
            per_test_timeout = {tc: None for tc in testcases}
            if use_recorded_timeout_times is not None:
                ERROR_HANDLER.assert_true(use_recorded_timeout_times > 0, \
                                        "use_recorded_timeout_times must be "
                                        "positive if not None", __file__)
                per_test_timeout.update({x: (y * use_recorded_timeout_times) \
                                for x, y in self.test_execution_time.items()})
        else:
            ERROR_HANDLER.assert_true(use_recorded_timeout_times is None, \
                                "use_recorded_timeout_times must not be set "
                                "when per_test_timeout is set", __file__)

        # Prepare the exes
        self._prepare_executable(exe_path_map, env_vars, \
                                            collect_output=with_outlog_hash)
        self._set_env_vars(env_vars)

        test_failed_verdicts = {} 
        test_outlog_hash = {} 
        for testcase in testcases:
            start_time = time.time()
            test_failed, execoutlog_hash = \
                        self._oracle_execute_a_test(testcase, exe_path_map, \
                                        env_vars, \
                                        timeout=per_test_timeout[testcase], \
                                        with_outlog_hash=with_outlog_hash)
            
            # Record exec time if not existing
            if recalculate_execution_times:
                self.test_execution_time[testcase] = \
                                            1 + int(time.time() - start_time)

            test_failed_verdicts[testcase] = test_failed
            test_outlog_hash[testcase] = execoutlog_hash
            if stop_on_failure and test_failed != \
                                common_mix.GlobalConstants.PASS_TEST_VERDICT:
                break
        
        if recalculate_execution_times:
            common_fs.dumpJSON(self.test_execution_time, \
                            self.test_execution_time_storage_file, pretty=True)

        # Restore back the exes
        self._restore_env_vars()
        self._restore_default_executable(exe_path_map, env_vars, \
                                            collect_output=with_outlog_hash)

        if stop_on_failure:
            # Make sure the non executed test has the uncertain value (None)
            if len(test_failed_verdicts) < len(testcases):
                for testcase in set(testcases) - set(test_failed_verdicts):
                    test_failed_verdicts[testcase] = \
                            common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT
                    test_outlog_hash[testcase] = common_matrices.\
                                        OutputLogData.UNCERTAIN_TEST_OUTLOGDATA

        # @Checkpoint: Finished (for time)
        checkpoint_handler.set_finished(None)

        if not with_outlog_hash:
            test_outlog_hash = None

        return test_failed_verdicts, test_outlog_hash
コード例 #11
0
    def _setup_repository(self):
        # Make sure the repo dir exists
        ERROR_HANDLER.assert_true(os.path.isdir(self.repository_rootdir), \
                        "given repository dir is not existing: {}". format( \
                            self.repository_rootdir), __file__)

        # make sure the repo dir is a git repo
        # if no, ask the user whether to initialize and initialize or abort
        # if yes or user accepted initialize, get the git object for the repo
        try:
            repo = git_repo(self.repository_rootdir)
            gitobj = repo.git
        except git_exc.InvalidGitRepositoryError:
            make_it_git = common_mix.confirm_execution("{} {} {} {}".format(\
                                    "The given repository directory is not", \
                                    "a git repository, this must be a git", \
                                    "repository to proceed.\n Do you want to",\
                                    "set is as git repository?"))
            if make_it_git:
                repo = git_repo.init(self.repository_rootdir)
                gitobj = repo.git
            else:
                ERROR_HANDLER.error_exit("{} {}".format(\
                            "Must make the repository as git repository,", \
                            "then re-run"), __file__)

        # Check whether the repo is already managed by muteria
        ## if using branch
        if self.delete_created_on_revert_as_initial:
            if self.test_branch_name not in self._get_branches_list(\
                                                self.repository_rootdir):
                ## Not managed, create branch
                self._make_testing_branch(self.repository_rootdir, \
                                                        self.test_branch_name)

            # checkout
            gitobj.checkout(self.test_branch_name)

        # Actual Check whether the repo is already managed by muteria
        # There must be a directory DEFAULT_MUTERIA_REPO_META_FOLDER
        src_in_prev = set()
        if os.path.isdir(self.muteria_metadir):
            ## Managed
            prev_src_list, prev_use_branch = \
                            common_fs.loadJSON(self.muteria_metadir_info_file)
            ERROR_HANDLER.assert_true(prev_use_branch == \
                        self.delete_created_on_revert_as_initial, \
                        "{} {} {} {} {}".format(\
                            "unmatching repo backup type.", \
                            "previously, use branch was", \
                            prev_use_branch, ", now it is",\
                            self.delete_created_on_revert_as_initial), \
                                                                    __file__)
            src_in_prev = set(prev_src_list) & set(self.source_files_list)
            prev_src_list = set(prev_src_list) - set(self.source_files_list)
            remain_prev_src_set = {src for src in prev_src_list \
                                    if os.path.isfile(self.repo_abs_path(src))}
            # make sure that all prev_src are in initial state
            untracked_diff_files = self._get_untracked_and_diffed(repo)
            prev_untracked_diff = remain_prev_src_set & untracked_diff_files
            if len(prev_untracked_diff) > 0:
                bypass = common_mix.confirm_execution(\
                            "{} {} {} {} {}".format(
                                "the following files were previously used as",\
                                "src files by muteria and are now untracked:",\
                                prev_untracked_diff, \
                                "\nDo you want to handle it or bypass it?",\
                                "Choose yes to bypass: "******"{} {}".format(\
                                "Do you want to automatically restore the",\
                                "The untracked previous source and continue?"))
                    if revert_them:
                        for src in prev_untracked_diff:
                            self.revert_repository_file(src, gitobj=gitobj)
                    else:
                        ERROR_HANDLER.error_exit(\
                                "Handle it manually and restart the execution")

            # update the info_file
            if set(prev_src_list) != set(self.source_files_list):
                common_fs.dumpJSON([self.source_files_list, \
                                    self.delete_created_on_revert_as_initial],\
                                                self.muteria_metadir_info_file)
        else:
            ## Not managed
            os.mkdir(self.muteria_metadir)
            common_fs.dumpJSON([self.source_files_list, \
                                    self.delete_created_on_revert_as_initial],\
                                                self.muteria_metadir_info_file)

        # Make sure all source files of interest are tracked
        untracked_diff_files = self._get_untracked_and_diffed(repo)
        untracked_diff_src_in_prev = untracked_diff_files & src_in_prev
        if len(untracked_diff_src_in_prev) > 0:
            if common_mix.confirm_execution(\
                            "{} {} {} {} {}".format("The following source",\
                                        "files of interest are untracked", \
                    "and will be reverted (previous execution unfinished):", \
                                        src_in_prev, \
                                        "do you want to revert them?")):
                for src in src_in_prev:
                    self.revert_repository_file(src, gitobj=gitobj)
            else:
                ERROR_HANDLER.error_exit("{} {}".format(\
                                    "Handle untracked source files manually", \
                                                "then restart the execution"))

        untracked_diff_files = self._get_untracked_and_diffed(repo)
        untracked_diff_src_files = set(self.source_files_list) & \
                                                        untracked_diff_files
        if len(untracked_diff_src_files) > 0:
            if common_mix.confirm_execution(\
                                "{} {} {} {}".format("The following source",\
                                        "files of interest are untracked:", \
                                        untracked_diff_src_files, \
                                        "do you want to track them?")):
                repo.index.add(list(untracked_diff_src_files))
            else:
                ERROR_HANDLER.error_exit("{} {}".format(\
                                    "Handle untracked source files manually", \
                                                "then restart the execution"))
コード例 #12
0
ファイル: meta_testcasetool.py プロジェクト: Marvinmw/muteria
    def runtests(self, meta_testcases=None, exe_path_map=None, env_vars=None, \
                        stop_on_failure=False, \
                        per_test_timeout=None, \
                        use_recorded_timeout_times=None, \
                        recalculate_execution_times=False, \
                        fault_test_execution_matrix_file=None, \
                        fault_test_execution_execoutput_file=None, \
                        with_outlog_hash=True, \
                        test_prioritization_module=None, \
                        parallel_test_count=1, \
                        parallel_test_scheduler=None, \
                        restart_checkpointer=False,
                        finish_destroy_checkpointer=True):
        '''
        Execute the list of test cases with the given executable and 
        say, for each test case, whether it failed

        :param meta_testcases: list of test cases to execute
        :param exe_path_map: string representing the file system path to 
                        the executable to execute with the tests
        :param env_vars: dict of environment variables to set before
                        executing each test ({<variable>: <value>})
        :param stop_on_failure: decide whether to stop the test 
                        execution once a test fails
        :param fault_test_execution_matrix_file: Optional matrix file 
                        to store the tests' pass fail execution data
        :param fault_test_execution_execoutput_file: Optional output log file 
                        to store the tests' execution actual output (hashed)
        :param with_outlog_hash: decide whether to return outlog hash 
        :param test_prioritization_module: Specify the test prioritization
                        module. 
                        (TODO: Implement support)
        :param parallel_test_count: Specify the number of parallel test
                        Execution. must be an integer >= 1
        :param parallel_test_scheduler: Specify the function that will
                        handle parallel test scheduling by tool, using
                        the test execution optimizer. 
                        (TODO: Implement support)

        :type restart_checkointer: bool
        :param restart_checkointer: Decide whether to discard checkpoint
                        and restart anew.

        :type finish_destroy_checkpointer: bool
        :param finish_destroy_checkpointer: Decide whether to automatically 
                        destroy the checkpointer when done or not
                        Useful is caller has a checkpointer to update. 

        :returns: dict of testcase and their failed verdict.
                 {<test case name>: <True if failed, False if passed,
                    UNCERTAIN_TEST_VERDICT if uncertain>}
                 If stop_on_failure is True, only return the tests that 
                 have been executed until the failure
        '''

        # FIXME: Make sure that the support are implemented for
        # parallelism and test prioritization. Remove the code bellow
        # once supported:
        ERROR_HANDLER.assert_true(test_prioritization_module is None, \
                        "Must implement test prioritization support here", \
                                                                    __file__)
        ERROR_HANDLER.assert_true(parallel_test_count <= 1, \
                    "Must implement parallel tests execution support here", \
                                                                    __file__)
        ERROR_HANDLER.assert_true(parallel_test_scheduler is None, \
                    "Must implement parallel tests execution support here", \
                                                                    __file__)
        #~FIXMEnd

        # Check arguments Validity
        if exe_path_map is None:
            exe_path_map = self._get_default_exe_path_map()

        ERROR_HANDLER.assert_true(parallel_test_count > 0, \
                    "invalid parallel test execution count: {}. {}".format( \
                                    parallel_test_count, "must be >= 1"))

        # @Checkpoint: create a checkpoint handler
        cp_func_name = "runtests"
        cp_task_id = 1
        checkpoint_handler = \
                CheckPointHandler(self.get_checkpoint_state_object())
        if restart_checkpointer:
            checkpoint_handler.restart()
        if checkpoint_handler.is_finished():
            logging.warning("%s %s" %("The function 'runtests' is finished", \
                "according to checkpoint, but called again. None returned"))
            if common_mix.confirm_execution("%s %s" % ( \
                                        "Function 'runtests' is already", \
                                        "finished, do you want to restart?")):
                checkpoint_handler.restart()
                logging.info("Restarting the finished 'runtests'")
            else:
                ERROR_HANDLER.error_exit(err_string="%s %s %s" % (\
                        "Execution halted. Cannot continue because no value", \
                        " can be returned. Check the results of the", \
                        "finished execution"), call_location=__file__)

        # @Checkpoint: Get the saved payload (data kapt for each tool)
        # pair list of testfailed verdict and execution output
        meta_test_failedverdicts_outlog = \
                                    checkpoint_handler.get_optional_payload()
        if meta_test_failedverdicts_outlog is None:
            meta_test_failedverdicts_outlog = [{}, {}]

        # Make sure the tests are unique
        ERROR_HANDLER.assert_true(len(meta_testcases) == \
                                                len(set(meta_testcases)), \
                                        "not all tests are unique", __file__)

        testcases_by_tool = {}
        for meta_testcase in meta_testcases:
            ttoolalias, testcase = \
                            DriversUtils.reverse_meta_element(meta_testcase)
            if ttoolalias not in testcases_by_tool:
                testcases_by_tool[ttoolalias] = []
            testcases_by_tool[ttoolalias].append(testcase)

        found_a_failure = False
        for tpos, ttoolalias in enumerate(testcases_by_tool.keys()):
            # @Checkpoint: Check whether already executed
            if not checkpoint_handler.is_to_execute(func_name=cp_func_name, \
                                                taskid=cp_task_id, \
                                                tool=ttoolalias):
                continue

            # Actual execution
            ttool = \
                self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY]
            test_failed_verdicts, test_execoutput = ttool.runtests( \
                                            testcases_by_tool[ttoolalias], \
                                            exe_path_map, env_vars, \
                                            stop_on_failure, \
                                            per_test_timeout=per_test_timeout,
                                            use_recorded_timeout_times=\
                                                use_recorded_timeout_times, \
                                            recalculate_execution_times=\
                                                recalculate_execution_times, \
                                            with_outlog_hash=with_outlog_hash)
            for testcase in test_failed_verdicts:
                meta_testcase =  \
                        DriversUtils.make_meta_element(testcase, ttoolalias)
                meta_test_failedverdicts_outlog[0][meta_testcase] = \
                                                test_failed_verdicts[testcase]
                meta_test_failedverdicts_outlog[1][meta_testcase] = \
                                                    test_execoutput[testcase]
                if test_failed_verdicts[testcase] == \
                                common_mix.GlobalConstants.COMMAND_UNCERTAIN:
                    found_a_failure = True

            # @Checkpoint: Chekpointing
            checkpoint_handler.do_checkpoint(func_name=cp_func_name, \
                                taskid=cp_task_id, \
                                tool=ttoolalias, \
                                opt_payload=meta_test_failedverdicts_outlog)

            if stop_on_failure and found_a_failure:
                # @Checkpoint: Chekpointing for remaining tools
                for rem_tool in list(testcases_by_tool.keys())[tpos + 1:]:
                    checkpoint_handler.do_checkpoint(func_name=cp_func_name, \
                                taskid=cp_task_id, \
                                tool=rem_tool, \
                                opt_payload=meta_test_failedverdicts_outlog)
                break

        if stop_on_failure:
            # Make sure the non executed test has the uncertain value (None)
            if len(meta_test_failedverdicts_outlog[0]) < len(meta_testcases):
                for meta_testcase in set(meta_testcases) - \
                                    set(meta_test_failedverdicts_outlog[0]):
                    meta_test_failedverdicts_outlog[0][meta_testcase] = \
                            common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT
                    meta_test_failedverdicts_outlog[1][meta_testcase] = \
                            common_matrices.OutputLogData.\
                                                    UNCERTAIN_TEST_OUTLOGDATA

        ERROR_HANDLER.assert_true(len(meta_test_failedverdicts_outlog[0]) == \
                                                        len(meta_testcases), \
                            "Not all tests have a verdict reported", __file__)

        if fault_test_execution_matrix_file is not None:
            # Load or Create the matrix
            fault_test_execution_matrix = common_matrices.ExecutionMatrix( \
                                filename=fault_test_execution_matrix_file, \
                                            non_key_col_list=meta_testcases)
            ERROR_HANDLER.assert_true(fault_test_execution_matrix.is_empty(), \
                                            "matrix must be empty", __file__)
            failverdict2val = {
                common_mix.GlobalConstants.FAIL_TEST_VERDICT: \
                        fault_test_execution_matrix.getActiveCellDefaultVal(),
                common_mix.GlobalConstants.PASS_TEST_VERDICT: \
                            fault_test_execution_matrix.getInactiveCellVal(),
                common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT: \
                    fault_test_execution_matrix.getUncertainCellDefaultVal(),
            }
            cells_dict = {}
            for meta_testcase in meta_test_failedverdicts_outlog[0]:
                cells_dict[meta_testcase] = failverdict2val[\
                            meta_test_failedverdicts_outlog[0][meta_testcase]]

            fault_test_execution_matrix.add_row_by_key(self.FAULT_MATRIX_KEY, \
                                                cells_dict, serialize=True)

        if fault_test_execution_execoutput_file is None:
            meta_test_failedverdicts_outlog[1] = None
        else:
            # Load or Create the data object
            fault_test_execution_execoutput = common_matrices.OutputLogData( \
                                filename=fault_test_execution_execoutput_file)
            ERROR_HANDLER.assert_true(\
                            fault_test_execution_execoutput.is_empty(), \
                                        "outlog data must be empty", __file__)
            fault_test_execution_execoutput.add_data(\
                                    {self.PROGRAM_EXECOUTPUT_KEY: \
                                         meta_test_failedverdicts_outlog[1]}, \
                                                                serialize=True)

        # @Checkpoint: Finished
        detailed_exectime = {}
        for ttoolalias in testcases_by_tool.keys():
            tt = self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY]
            detailed_exectime[ttoolalias] = (\
                        tt.get_checkpointer().get_execution_time(),\
                        tt.get_checkpointer().get_detailed_execution_time())

        checkpoint_handler.set_finished( \
                                    detailed_exectime_obj=detailed_exectime)

        if finish_destroy_checkpointer:
            checkpoint_handler.destroy()

        return meta_test_failedverdicts_outlog
コード例 #13
0
    def _runtests(self, testcases, exe_path_map, env_vars, \
                                stop_on_failure=False, per_test_timeout=None, \
                                use_recorded_timeout_times=None, \
                                recalculate_execution_times=False, \
                                with_output_summary=True, hash_outlog=True, \
                                parallel_count=1):
        '''
        Execute the list of test cases with the given executable and 
        say, for each test case, whether it failed.
        Note: Re-implement this if there the tool implements ways to faster
        execute multiple test cases.

        :param testcases: list of test cases to execute
        :param exe_path_map: string representing the file system path to 
                        the executable to execute with the tests
        :param env_vars: dict of environment variables to set before
                        executing each test ({<variable>: <value>})
        :param stop_on_failure: decide whether to stop the test execution once
                        a test fails
        :returns: plitair of:
                - dict of testcase and their failed verdict.
                 {<test case name>: <True if failed, False if passed, 
                    UNCERTAIN_TEST_VERDICT if uncertain>}
                 If stop_on_failure is True, only return the tests that have 
                 been executed until the failure
                - test execution output log hash data object or None
        '''
        # @Checkpoint: create a checkpoint handler (for time)
        checkpoint_handler = CheckPointHandler(self.get_checkpointer())
        if checkpoint_handler.is_finished():
            logging.warning("{} {} {}".format( \
                            "The function 'runtests' is finished according", \
                            "to checkpoint, but called again. None returned", \
                            "\nPlease Confirm reexecution..."))
            if common_mix.confirm_execution("{} {}".format( \
                                "Function 'runtests' is already", \
                                "finished, do yo want to restart?")):
                checkpoint_handler.restart()
                logging.info("Restarting the finished 'runtests'")
            else:
                ERROR_HANDLER.error_exit(err_string="{} {} {}".format( \
                        "Execution halted. Cannot continue because no value", \
                        " can be returned. Check the results of the", \
                        "finished execution"), call_location=__file__)

        if per_test_timeout is None:
            per_test_timeout = {tc: None for tc in testcases}
            if use_recorded_timeout_times is not None:
                ERROR_HANDLER.assert_true(use_recorded_timeout_times > 0, \
                                        "use_recorded_timeout_times must be "
                                        "positive if not None", __file__)
                per_test_timeout.update({x: (y * use_recorded_timeout_times) \
                                for x, y in self.test_execution_time.items()})
        else:
            ERROR_HANDLER.assert_true(use_recorded_timeout_times is None, \
                                "use_recorded_timeout_times must not be set "
                                "when per_test_timeout is set", __file__)

        # Prepare the exes
        self._prepare_executable(exe_path_map, env_vars, \
                                            collect_output=with_output_summary)
        self._set_env_vars(env_vars)

        test_failed_verdicts = {} 
        test_outlog_hash = {} 
        processbar = tqdm.tqdm(testcases, leave=False, dynamic_ncols=True) 

        # Parallel stuffs
        def test_exec_iteration(testcase):
            processbar.set_description("Running Test {} (x{})".format(\
                                                  testcase, parallel_count))
            start_time = time.time()
            test_failed, execoutlog_hash = \
                        self._oracle_execute_a_test(testcase, exe_path_map, \
                                        env_vars, \
                                        timeout=per_test_timeout[testcase], \
                                    with_output_summary=with_output_summary, \
                                        hash_outlog=hash_outlog)
            
            #if testcase.endswith('.ktest'):  # DBG - fix hang
            #    logging.debug("KTEST {} is done".format(testcase))

            # Record exec time if not existing
            with self.shared_loc:
                if recalculate_execution_times:
                    self.test_execution_time[testcase] = \
                                     max(1, int(time.time() - start_time)) \
                                 * self.config.RECORDED_TEST_TIMEOUT_FACTOR

                test_failed_verdicts[testcase] = test_failed
                test_outlog_hash[testcase] = execoutlog_hash
            return test_failed
        #~ def test_exec_iteration()

        if self.can_run_tests_in_parallel() and parallel_count is not None \
                                                    and parallel_count > 1:
            parallel_count = min(len(testcases), parallel_count)
            joblib.Parallel(n_jobs=parallel_count, require='sharedmem')\
                            (joblib.delayed(test_exec_iteration)(testcase) \
                                                for testcase in processbar)
        else:
            parallel_count = 1 # to be printed in progress
            for testcase in processbar: 
                test_failed = test_exec_iteration(testcase)
                if stop_on_failure and test_failed != \
                                common_mix.GlobalConstants.PASS_TEST_VERDICT:
                    break
        
        if recalculate_execution_times:
            common_fs.dumpJSON(self.test_execution_time, \
                            self.test_execution_time_storage_file, pretty=True)

        # Restore back the exes
        self._restore_env_vars()
        self._restore_default_executable(exe_path_map, env_vars, \
                                            collect_output=with_output_summary)

        if stop_on_failure:
            # Make sure the non executed test has the uncertain value (None)
            if len(test_failed_verdicts) < len(testcases):
                for testcase in set(testcases) - set(test_failed_verdicts):
                    test_failed_verdicts[testcase] = \
                            common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT
                    test_outlog_hash[testcase] = common_matrices.\
                                        OutputLogData.UNCERTAIN_TEST_OUTLOGDATA

        # @Checkpoint: Finished (for time)
        checkpoint_handler.set_finished(None)

        if not with_output_summary:
            test_outlog_hash = None

        ERROR_HANDLER.assert_true(len(testcases) == len(test_failed_verdicts),\
                  "Mismatch between testcases and test_failed_verdict (BUG)",\
                                                                     __file__)

        return test_failed_verdicts, test_outlog_hash