Esempio n. 1
0
    def cross_folder_fdupes(custom_bin, folders, folder2alias):
        dup_list, invalid = KTestTestFormat.ktest_fdupes(*folders, \
                                custom_replay_tool_binary_dir=custom_bin)
        ERROR_HANDLER.assert_true(len(invalid) == 0, \
                            "there should be no invalid here", __file__)

        kepttest2duptest_map = {}
        test2keptdup = {}
        # get fdupes with metatests
        # TODO: check that each result of relpath is really a test
        for dup_tuple in dup_list:
            k_dir = KTestTestFormat.get_dir(dup_tuple[0], folders)
            key = os.path.relpath(dup_tuple[0], k_dir)
            key = DriversUtils.make_meta_element(key, \
                                                    folder2alias[k_dir])
            kepttest2duptest_map[key] = []
            for dp in dup_tuple[1:]:
                v_dir = KTestTestFormat.get_dir(dp, folders)
                val = os.path.relpath(dp, v_dir)
                val = DriversUtils.make_meta_element(val, \
                                                    folder2alias[v_dir])
                kepttest2duptest_map[key].append(val)
                test2keptdup[val] = key

        return kepttest2duptest_map, test2keptdup
Esempio n. 2
0
    def _extract_coverage_data_of_a_test(self, enabled_criteria, \
                                    test_execution_verdict, result_dir_tmp):
        ''' read json files and extract data
            return: the dict of criteria with covering count
        '''
        in_file = os.path.join(result_dir_tmp, self.cov_data_filename)
        cov_dat_obj = common_fs.loadJSON(in_file)
        cov_dat_obj = {TestCriteria[v]: cov_dat_obj[v] for v in cov_dat_obj}

        ERROR_HANDLER.assert_true(set(cov_dat_obj) == set(enabled_criteria), \
                                    "mismatching criteria enabled", __file__)

        res = {c: {} for c in enabled_criteria}

        for c in cov_dat_obj:
            for filename, filedat in list(cov_dat_obj[c].items()):
                if filedat is not None:
                    for id_, cov in list(filedat.items()):
                        ident = DriversUtils.make_meta_element(\
                                                            str(id_), filename)
                        res[c][ident] = cov

        # delete cov file
        os.remove(in_file)

        return res
Esempio n. 3
0
    def reset(self, toolalias, test_objective_list, test_list, **kwargs):
        """ Reset the optimizer
        """
        self.test_objective_ordered_list = copy.deepcopy(test_objective_list)
        self.pointer = 0
        self.test_objective_to_test_execution_optimizer = {
            to: TestExecutionOptimizer(self.config, self.explorer) \
            for to in self.test_objective_ordered_list
        }

        # get the test list per test objectives, based on the matrix
        opt_by_criterion = self._get_optimizing_criterion()
        matrix_file = self.explorer.get_existing_file_pathname(\
                                explorer.TMP_CRITERIA_MATRIX[opt_by_criterion])
        test_list_per_test_objective = \
                            self._get_test_objective_tests_from_matrix(\
                                                    matrix_file=matrix_file)

        # Update test list
        for to, teo in self.test_objective_to_test_execution_optimizer.items():
            alias_to = DriversUtils.make_meta_element(to, toolalias)
            ERROR_HANDLER.assert_true(\
                                    alias_to in test_list_per_test_objective, \
                                    "Bug: test objective missing("+str(to)+')')
            teo.reset(None, test_list_per_test_objective[alias_to], \
                                                            disable_reset=True)
Esempio n. 4
0
    def reset(self, toolalias, test_objective_list, test_list, **kwargs):
        """ Reset the optimizer
        """
        # Make sure that all objectives in test_objective_list are in dictobj
        # and get test_of_testobjective
        test_of_to = {}
        for to in test_objective_list:
            meta_to = DriversUtils.make_meta_element(to, toolalias)
            ERROR_HANDLER.assert_true(meta_to in self.dictobj, \
                                "meta test objective {} not in self.dictobj"\
                                                    .format(meta_to), __file__)
            test_of_to[to] = self.dictobj[meta_to]

            # If None as testlist, use all tests
            if test_of_to[to] is None:
                test_of_to[to] = test_list
            else:
                test_of_to[to] = list(set(test_list) & set(test_of_to[to]))

        self.test_objective_ordered_list = copy.deepcopy(test_objective_list)
        self.pointer = 0
        self.test_objective_to_test_execution_optimizer = {
            to: TestExecutionOptimizer(self.config, self.explorer) \
            for to in self.test_objective_ordered_list
        }
        for to, teo in self.test_objective_to_test_execution_optimizer.items():
            teo.reset(None, test_of_to[to], disable_reset=True)
Esempio n. 5
0
 def _compute_testcases_info(self, candidate_tool_aliases=None):
     meta_testcase_info_obj = TestcasesInfoObject()
     if candidate_tool_aliases is None:
         candidate_tool_aliases = self.testcases_configured_tools.keys()
     for ttoolalias in candidate_tool_aliases:
         ttool = \
             self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY]
         tool_testcase_info = ttool.get_testcase_info_object()
         old2new_tests = {}
         for t_test in tool_testcase_info.get_tests_list():
             meta_t_key = DriversUtils.make_meta_element(t_test, ttoolalias)
             old2new_tests[t_test] = meta_t_key
         meta_testcase_info_obj.update_using(ttoolalias, old2new_tests, \
                                                         tool_testcase_info)
     return meta_testcase_info_obj
Esempio n. 6
0
 def after_command(self):
     if self.op_retval == common_mix.GlobalConstants.COMMAND_FAILURE:
         return common_mix.GlobalConstants.COMMAND_FAILURE
     m_regex = re.compile('({}|{}|{})'.format('klee_change', \
                                     'klee_get_true', 'klee_get_false'))
     matched_lines = set()
     for src in self.source_files_to_objects:
         with open(os.path.join(self.repository_rootdir, src), \
                                                 encoding='UTF-8') as f:
             for lnum, line in enumerate(f.readlines()):
                 if m_regex.search(line) is not None:
                     matched_lines.add(DriversUtils.make_meta_element(\
                                                     str(lnum+1), src))
     ret_lines = self.post_callback_args
     ret_lines.extend(matched_lines)
     return common_mix.GlobalConstants.COMMAND_SUCCESS
Esempio n. 7
0
        def tool_parallel_test_exec(ttoolalias):
            # Actual execution
            found_a_failure = False
            # Whether the execution was unsuccessful
            test_error = False
            ttool = \
                self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY]
            test_failed_verdicts, test_execoutput = ttool.runtests( \
                                testcases_by_tool[ttoolalias], \
                                exe_path_map, env_vars, \
                                stop_on_failure, \
                                per_test_timeout=per_test_timeout,
                                use_recorded_timeout_times=\
                                    use_recorded_timeout_times, \
                                recalculate_execution_times=\
                                    recalculate_execution_times, \
                                with_output_summary=\
                                            with_output_summary, \
                                hash_outlog=hash_outlog, \
                                parallel_count=\
                                    parallel_test_count_by_tool[ttoolalias])
            with shared_loc:
                for testcase in test_failed_verdicts:
                    meta_testcase = DriversUtils.make_meta_element(\
                                                        testcase, ttoolalias)
                    meta_test_failedverdicts_outlog[0][meta_testcase] = \
                                                test_failed_verdicts[testcase]
                    meta_test_failedverdicts_outlog[1][meta_testcase] = \
                                                    test_execoutput[testcase]
                    if not found_a_failure \
                                and test_failed_verdicts[testcase] == \
                                common_mix.GlobalConstants.FAIL_TEST_VERDICT:
                        found_a_failure = True
                    if not test_error \
                                and test_failed_verdicts[testcase] == \
                            common_mix.GlobalConstants.TEST_EXECUTION_ERROR:
                        test_error = True

                # @Checkpoint: Chekpointing
                checkpoint_handler.do_checkpoint(func_name=cp_func_name, \
                                taskid=cp_task_id, \
                                tool=ttoolalias, \
                                opt_payload=meta_test_failedverdicts_outlog)
            return found_a_failure, test_error
Esempio n. 8
0
    def _compute_criterion_info(self, criterion, candidate_tool_aliases=None):
        if criterion not in CriteriaToInfoObject:
            return None

        meta_criterion_info_obj = CriteriaToInfoObject[criterion]()
        if candidate_tool_aliases is None:
            candidate_tool_aliases = []
            for _, config in list(self.tools_config_by_criterion_dict.items()):
                candidate_tool_aliases.append(config.get_tool_config_alias())
        for ctoolalias in candidate_tool_aliases:
            ctool = self.criteria_configured_tools[ctoolalias]\
                                                            [self.TOOL_OBJ_KEY]
            tool_element_info = ctool.get_criterion_info_object(criterion)
            old2new_tests = {}
            for c_elem in tool_element_info.get_elements_list():
                meta_c_key = DriversUtils.make_meta_element(c_elem, ctoolalias)
                old2new_tests[c_elem] = meta_c_key
            meta_criterion_info_obj.update_using(ctoolalias, old2new_tests, \
                                                            tool_element_info)
        return meta_criterion_info_obj
    def _runtest_separate_criterion_program (self, criterion, testcases, \
                                    matrix,
                                    executionoutput, \
                                    criteria_element_list, \
                                    cover_criteria_elements_once=False, \
                                    prioritization_module=None, \
                                    test_parallel_count=1,
                                    serialize_period=5,
                                    checkpoint_handler=None, \
                                    cp_calling_func_name=None, \
                                    cp_calling_done_task_id=None, \
                                    cp_calling_tool=None):
        '''
        Note: Here the temporary matrix is used as checkpoint 
                (with frequency the 'serialize_period' parameter). 
            The checkpointer is mainly used for the execution time
            (TODO: support parallelism: per test outdata)
        '''
        # FIXME: Support parallelism, then remove the code
        # bellow:
        ERROR_HANDLER.assert_true(test_parallel_count <= 1, \
                    "FIXME: Must first implement support for parallel mutatio")
        #~ FXIMEnd

        # @Checkpoint: validate
        if checkpoint_handler is not None:
            ERROR_HANDLER.assert_true(cp_calling_func_name is not None)
            ERROR_HANDLER.assert_true(cp_calling_done_task_id is not None)
            ERROR_HANDLER.assert_true(cp_calling_tool is not None)
            cp_data = checkpoint_handler.get_optional_payload()
            if cp_data is None:
                cp_data = [{}, {}]
        else:
            cp_data = [{}, {}]

        failverdict_to_val_map = {
                    common_mix.GlobalConstants.FAIL_TEST_VERDICT: \
                                            matrix.getActiveCellDefaultVal(),
                    common_mix.GlobalConstants.PASS_TEST_VERDICT: \
                                            matrix.getInactiveCellVal(),
                    common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT: \
                                            matrix.getUncertainCellDefaultVal()
        }

        assert serialize_period >= 1, \
                            "Serialize period must be an integer in [1,inf["

        # matrix based checkpoint
        completed_elems = set(cp_data[0])

        if criteria_element_list is None:
            criteria_element_list = self.get_criterion_info_object(criterion).\
                                                            get_elements_list()

        ERROR_HANDLER.assert_true(prioritization_module is not None,
                                  "prioritization module must be passed")

        timeout_times = \
                    self.config.SEPARATED_TEST_EXECUTION_EXTRA_TIMEOUT_TIMES

        # in case the test list is empty, do nothing
        if len(testcases) > 0:
            # main loop for elements execution
            num_elems = len(criteria_element_list)
            pos = -1 + len(completed_elems)
            ## prepare the optimizer
            prioritization_module.reset(self.config.get_tool_config_alias(), \
                                            criteria_element_list, testcases)
            while prioritization_module.has_next_test_objective():
                #pos += 1 # Done bellow in the case where it was not completed
                element = prioritization_module.get_next_test_objective()

                # @Checkpointing: check if already executed
                if element in completed_elems:
                    continue
                else:
                    pos += 1

                logging.debug("# Executing {} element {} ({}/{}) ...".format( \
                                    criterion.get_str(), \
                                    DriversUtils.make_meta_element(element, \
                                        self.config.get_tool_config_alias()), \
                                    pos, num_elems))

                # execute element with the given testcases
                element_executable_path = \
                                self._get_criterion_element_executable_path(\
                                                            criterion, element)
                execution_environment_vars = \
                                self._get_criterion_element_environment_vars(\
                                                            criterion, element)
                # run optimizer with all tests of targeting the test objective
                may_cov_tests = prioritization_module\
                                        .get_test_execution_optimizer(element)\
                                        .select_tests(100, is_proportion=True)

                cannot_cov_tests = set(testcases) - set(may_cov_tests)

                fail_verdicts, exec_outs_by_tests = \
                                    self.meta_test_generation_obj.runtests(\
                                        meta_testcases=may_cov_tests, \
                                        exe_path_map=element_executable_path, \
                                        env_vars=execution_environment_vars, \
                                        stop_on_failure=\
                                                cover_criteria_elements_once, \
                                        use_recorded_timeout_times=\
                                                            timeout_times, \
                                        with_output_summary=(executionoutput \
                                                                is not None), \
                                        parallel_test_count=None, \
                                        restart_checkpointer=True)
                prioritization_module.feedback(element, fail_verdicts)

                fail_verdicts.update({\
                            v: common_mix.GlobalConstants.PASS_TEST_VERDICT \
                                                for v in cannot_cov_tests})
                # put in row format for matrix
                matrix_row_key = element
                matrix_row_values = \
                                {tc:failverdict_to_val_map[fail_verdicts[tc]] \
                                                    for tc in fail_verdicts}
                serialize_on = (pos % serialize_period == 0)
                cp_data[0][matrix_row_key] = matrix_row_values

                if executionoutput is not None:
                    cp_data[1][element] = exec_outs_by_tests

                # @Checkpointing: for time
                if serialize_on and checkpoint_handler is not None:
                    checkpoint_handler.do_checkpoint( \
                                            func_name=cp_calling_func_name, \
                                            taskid=cp_calling_done_task_id, \
                                            tool=cp_calling_tool, \
                                            opt_payload=cp_data)

        # Write the execution data into the matrix
        for matrix_row_key, matrix_row_values in list(cp_data[0].items()):
            matrix.add_row_by_key(matrix_row_key, matrix_row_values, \
                                                            serialize=False)

        # TODO: Make the following two instructions atomic
        # final serialization (in case #Muts not multiple od serialize_period)
        matrix.serialize()

        # Write the execution output data
        if executionoutput is not None:
            if len(cp_data[1]) > 0:
                executionoutput.add_data(cp_data[1], serialize=True)
            else:
                executionoutput.serialize()
Esempio n. 10
0
    def _call_generation_run(self, runtool, args):
        # Delete any klee-out-*
        for d in os.listdir(self.tests_working_dir):
            if d.startswith('klee-out-'):
                shutil.rmtree(os.path.join(self.tests_working_dir, d))

        call_shadow_wrapper_file = os.path.join(self.tests_working_dir, \
                                                                "shadow_wrap")

        devtest_toolalias = self.parent_meta_tool.get_devtest_toolalias()
        ERROR_HANDLER.assert_true(devtest_toolalias is not None, \
                        "devtest must be used when using shadow_se", __file__)

        #test_list = list(self.code_builds_factory.repository_manager\
        #                                               .get_dev_tests_list())
        test_list = []
        for meta_test in self.parent_meta_tool.get_testcase_info_object(\
                               candidate_tool_aliases=[devtest_toolalias])\
                                                            .get_tests_list():
            toolalias, test = DriversUtils.reverse_meta_element(meta_test)
            ERROR_HANDLER.assert_true(toolalias == devtest_toolalias, \
                           "BUG in above get_testcase_info_object", __file__)
            test_list.append(test)

        # Get list of klee_change, klee_get_true/false locations.
        klee_change_stmts = []

        get_lines_callback_obj = self.GetLinesCallbackObject()
        get_lines_callback_obj.set_pre_callback_args(self.code_builds_factory\
                                    .repository_manager.revert_src_list_files)
        get_lines_callback_obj.set_post_callback_args(klee_change_stmts)

        pre_ret, post_ret = self.code_builds_factory.repository_manager\
                                    .custom_read_access(get_lines_callback_obj)
        ERROR_HANDLER.assert_true(pre_ret == \
                                common_mix.GlobalConstants.COMMAND_SUCCESS,\
                                                    "pre failed", __file__)
        ERROR_HANDLER.assert_true(post_ret == \
                                common_mix.GlobalConstants.COMMAND_SUCCESS,\
                                                    "post failed", __file__)

        ERROR_HANDLER.assert_true(len(klee_change_stmts) > 0, \
                        "No klee_change statement in the sources", __file__)

        # Filter only tests that cover those locations,
        # if there is stmt coverage matrix
        stmt_cov_mat_file = self.head_explorer.get_file_pathname(\
                            fd_structure.CRITERIA_MATRIX[criteria.TestCriteria\
                                                        .STATEMENT_COVERAGE])
        cov_tests = None
        if os.path.isfile(stmt_cov_mat_file):
            stmt_cov_mat = common_matrices.ExecutionMatrix(\
                                                    filename=stmt_cov_mat_file)
            # due to possible wrapper test splitting we update test_list here
            tmp_test_list = []
            for mt in stmt_cov_mat.get_nonkey_colname_list():
                alias, t = DriversUtils.reverse_meta_element(mt)
                if alias == devtest_toolalias:
                    tmp_test_list.append(t)
            test_list = tmp_test_list

            meta_stmts = list(stmt_cov_mat.get_keys())
            tool_aliases = set()
            for meta_stmt in meta_stmts:
                alias, stmt = DriversUtils.reverse_meta_element(meta_stmt)
                tool_aliases.add(alias)
            klee_change_meta_stmts = []
            for alias in tool_aliases:
                klee_change_meta_stmts += [\
                                    DriversUtils.make_meta_element(e, alias) \
                                                    for e in klee_change_stmts]
            klee_change_meta_stmts = list(set(meta_stmts) & \
                                                set(klee_change_meta_stmts))

            cov_tests = set()
            if len(klee_change_meta_stmts) > 0:
                for _, t in stmt_cov_mat.query_active_columns_of_rows(\
                                row_key_list=klee_change_meta_stmts).items():
                    cov_tests |= set(t)
            else:
                logging.warning('No test covers the patch (SHADOW)!')
            #    ERROR_HANDLER.assert_true(len(klee_change_meta_stmts) > 0, \
            #                            "No test covers the patch", __file__)

        # tests will be generated in the same dir that has the input .bc file
        os.mkdir(self.tests_storage_dir)

        # obtain candidate tests
        cand_testpair_list = []
        for test in test_list:
            meta_test = DriversUtils.make_meta_element(test, devtest_toolalias)
            if cov_tests is not None and meta_test not in cov_tests:
                continue
            cand_testpair_list.append((test, meta_test))

        # Adjust the max-time in args
        ## locate max-time
        per_test_hard_timeout = None
        per_test_timeout = None
        if len(cand_testpair_list) > 0:
            cur_max_time = float(self.get_value_in_arglist(args, 'max-time'))
            if self.driver_config.get_gen_timeout_is_per_test():
                per_test_timeout = cur_max_time
            else:
                per_test_timeout = max(60, \
                                       cur_max_time / len(cand_testpair_list))
            self.set_value_in_arglist(args, 'max-time', str(per_test_timeout))

            # give time to dump remaning states
            per_test_hard_timeout = per_test_timeout + \
                                self.config.TEST_GEN_TIMEOUT_FRAMEWORK_GRACE

        #per_test_hard_timeout = 300 #DBG
        # Set the wrapper
        with open(call_shadow_wrapper_file, 'w') as wf:
            wf.write('#! /bin/bash\n\n')
            wf.write('set -u\n')
            wf.write('set -o pipefail\n\n')
            wf.write('ulimit -s unlimited\n')

            # timeout the shadow execution (some test create daemon which)
            # are not killed by test timeout. ALSO MAKE SURE TO DESACTIVATE
            # IN TEST SCRIPT TIMEOUT
            kill_after = 30
            wf.write('time_out_cmd="/usr/bin/timeout --kill-after={}s {}"\n'.\
                                     format(kill_after, per_test_hard_timeout))
            # kill after and time for timeout to act
            per_test_hard_timeout += kill_after + 60

            #wf.write(' '.join(['exec', runtool] + args + ['"${@:1}"']) + '\n')
            wf.write('\nstdindata="{}/klee-last/{}"\n'.format(\
                                                    self.tests_working_dir, \
                                        KTestTestFormat.STDIN_KTEST_DATA_FILE))
            wf.write('tmpstdindata="{}/{}"\n\n'.format(self.tests_working_dir,\
                                        KTestTestFormat.STDIN_KTEST_DATA_FILE))
            wf.write('if [ -t 0 ] # check if stdin do not exist\n')
            wf.write('then\n')
            wf.write(' '.join(['\t(', '$time_out_cmd', runtool] + args + \
                                    ['"${@:1}"', ') ; EXIT_CODE=$?', '\n']))
            wf.write('\t/usr/bin/touch $tmpstdindata\n')

            wf.write('else\n')
            wf.write('\t(/bin/cat - > $tmpstdindata ) || EXIT_CODE=1\n')
            wf.write(' '.join(['\t(', '/bin/cat $tmpstdindata | ', \
                            '$time_out_cmd', runtool] + args + \
                                ['"${@:1}"', ') ; EXIT_CODE=$?', '\n']))
            wf.write('fi\n\n')
            wf.write('/bin/mv $tmpstdindata $stdindata || EXIT_CODE=2\n')
            wf.write('\n# Provoke "unbound variable" if KLEE fails\n')
            wf.write('# Preserve the KLEE exit code\n')
            wf.write('exit $EXIT_CODE\n')
        os.chmod(call_shadow_wrapper_file, 0o775)

        # run test
        exes, _ = self.code_builds_factory.repository_manager\
                                                .get_relative_exe_path_map()
        ERROR_HANDLER.assert_true(len(exes) == 1, \
                                            "Must have a single exe", __file__)
        exe_path_map = {e: call_shadow_wrapper_file for e in exes}
        env_vars = {}
        self._dir_chmod777(self.tests_storage_dir)
        for test, meta_test in cand_testpair_list:
            self.parent_meta_tool.execute_testcase(meta_test, exe_path_map, \
                                    env_vars, timeout=per_test_hard_timeout,\
                                                    with_output_summary=False)

            #logging.debug("DBG: Just executed test '{}'".format(meta_test))
            #input(">>>> ") #DBG

            # copy the klee out
            test_out = os.path.join(self.tests_storage_dir, \
                                          self.get_sorage_name_of_test(test))
            os.mkdir(test_out)
            for d in glob.glob(self.tests_working_dir + "/klee-out-*"):
                # make sure we can do anything with it
                self._dir_chmod777(d)
                if not self.keep_first_test:
                    first_test = os.path.join(d, 'test000001.ktest')
                    if os.path.isfile(first_test):
                        shutil.move(first_test, first_test + '.disable')
                shutil.move(d, test_out)
            ERROR_HANDLER.assert_true(len(list(os.listdir(test_out))) > 0, \
                                "Shadow generated no test for tescase: "+test,\
                                                                    __file__)
            if os.path.islink(os.path.join(self.tests_working_dir, \
                                                                'klee-last')):
                os.unlink(os.path.join(self.tests_working_dir, 'klee-last'))

        # store klee_change locs
        common_fs.dumpJSON(klee_change_stmts, self.klee_change_locs_list_file)
Esempio n. 11
0
    def _extract_coverage_data_of_a_test(self, enabled_criteria, \
                                    test_execution_verdict, result_dir_tmp):
        ''' read json files and extract data
            return: the dict of criteria with covering count
            # TODO: Restrict to returning coverage of specified headers files
        '''
        gcov_list = common_fs.loadJSON(os.path.join(result_dir_tmp,\
                                                self.gcov_files_list_filename))
        
        res = {c: {} for c in enabled_criteria}

        func_cov = None
        branch_cov = None
        statement_cov = {}
        if TestCriteria.FUNCTION_COVERAGE in enabled_criteria:
            func_cov = res[TestCriteria.FUNCTION_COVERAGE]
        if TestCriteria.BRANCH_COVERAGE in enabled_criteria:
            branch_cov = res[TestCriteria.BRANCH_COVERAGE]
        if TestCriteria.STATEMENT_COVERAGE in enabled_criteria:
            statement_cov = res[TestCriteria.STATEMENT_COVERAGE]

        # Sources of interest
        _, src_map = self.code_builds_factory.repository_manager.\
                                                    get_relative_exe_path_map()

        for gcov_file in gcov_list:
            with open(gcov_file) as fp:
                last_line = None
                src_file = None
                for raw_line in fp:
                    line = raw_line.strip()
                    col_split = [v.strip() for v in line.split(':')]

                    if len(col_split) > 2 and col_split[1] == '0':
                        # preamble
                        if col_split[2] == "Source":
                            src_file = col_split[3]
                            if src_file not in src_map:
                                # src not in considered
                                break
                    elif line.startswith("function "):
                        # match function
                        parts = line.split()
                        ident = DriversUtils.make_meta_element(parts[1], \
                                                                    src_file)
                        func_cov[ident] = int(parts[3])
                    elif line.startswith("branch "):
                        # match branch
                        parts = line.split()
                        ident = DriversUtils.make_meta_element(parts[1], \
                                                                    last_line)
                        branch_cov[ident] = int(parts[3])

                    elif len(col_split) > 2 and \
                                            re.match(r"^\d+$", col_split[1]):
                        # match line
                        if col_split[0] == '-':
                            continue
                        last_line = DriversUtils.make_meta_element(\
                                                        col_split[1], src_file)
                        if col_split[0] in ('#####', '====='):
                            exec_count = 0
                        else:
                            exec_count = \
                                    int(re.findall(r'^\d+', col_split[0])[0])
                        statement_cov[last_line] = exec_count

        # delete gcov files
        for gcov_f in self._get_gcov_list():
            os.remove(gcov_f)

        return res
Esempio n. 12
0
    def runtests(self, meta_testcases=None, exe_path_map=None, env_vars=None, \
                        stop_on_failure=False, \
                        per_test_timeout=None, \
                        use_recorded_timeout_times=None, \
                        recalculate_execution_times=False, \
                        fault_test_execution_matrix_file=None, \
                        fault_test_execution_execoutput_file=None, \
                        with_outlog_hash=True, \
                        test_prioritization_module=None, \
                        parallel_test_count=1, \
                        parallel_test_scheduler=None, \
                        restart_checkpointer=False,
                        finish_destroy_checkpointer=True):
        '''
        Execute the list of test cases with the given executable and 
        say, for each test case, whether it failed

        :param meta_testcases: list of test cases to execute
        :param exe_path_map: string representing the file system path to 
                        the executable to execute with the tests
        :param env_vars: dict of environment variables to set before
                        executing each test ({<variable>: <value>})
        :param stop_on_failure: decide whether to stop the test 
                        execution once a test fails
        :param fault_test_execution_matrix_file: Optional matrix file 
                        to store the tests' pass fail execution data
        :param fault_test_execution_execoutput_file: Optional output log file 
                        to store the tests' execution actual output (hashed)
        :param with_outlog_hash: decide whether to return outlog hash 
        :param test_prioritization_module: Specify the test prioritization
                        module. 
                        (TODO: Implement support)
        :param parallel_test_count: Specify the number of parallel test
                        Execution. must be an integer >= 1
        :param parallel_test_scheduler: Specify the function that will
                        handle parallel test scheduling by tool, using
                        the test execution optimizer. 
                        (TODO: Implement support)

        :type restart_checkointer: bool
        :param restart_checkointer: Decide whether to discard checkpoint
                        and restart anew.

        :type finish_destroy_checkpointer: bool
        :param finish_destroy_checkpointer: Decide whether to automatically 
                        destroy the checkpointer when done or not
                        Useful is caller has a checkpointer to update. 

        :returns: dict of testcase and their failed verdict.
                 {<test case name>: <True if failed, False if passed,
                    UNCERTAIN_TEST_VERDICT if uncertain>}
                 If stop_on_failure is True, only return the tests that 
                 have been executed until the failure
        '''

        # FIXME: Make sure that the support are implemented for
        # parallelism and test prioritization. Remove the code bellow
        # once supported:
        ERROR_HANDLER.assert_true(test_prioritization_module is None, \
                        "Must implement test prioritization support here", \
                                                                    __file__)
        ERROR_HANDLER.assert_true(parallel_test_count <= 1, \
                    "Must implement parallel tests execution support here", \
                                                                    __file__)
        ERROR_HANDLER.assert_true(parallel_test_scheduler is None, \
                    "Must implement parallel tests execution support here", \
                                                                    __file__)
        #~FIXMEnd

        # Check arguments Validity
        if exe_path_map is None:
            exe_path_map = self._get_default_exe_path_map()

        ERROR_HANDLER.assert_true(parallel_test_count > 0, \
                    "invalid parallel test execution count: {}. {}".format( \
                                    parallel_test_count, "must be >= 1"))

        # @Checkpoint: create a checkpoint handler
        cp_func_name = "runtests"
        cp_task_id = 1
        checkpoint_handler = \
                CheckPointHandler(self.get_checkpoint_state_object())
        if restart_checkpointer:
            checkpoint_handler.restart()
        if checkpoint_handler.is_finished():
            logging.warning("%s %s" %("The function 'runtests' is finished", \
                "according to checkpoint, but called again. None returned"))
            if common_mix.confirm_execution("%s %s" % ( \
                                        "Function 'runtests' is already", \
                                        "finished, do you want to restart?")):
                checkpoint_handler.restart()
                logging.info("Restarting the finished 'runtests'")
            else:
                ERROR_HANDLER.error_exit(err_string="%s %s %s" % (\
                        "Execution halted. Cannot continue because no value", \
                        " can be returned. Check the results of the", \
                        "finished execution"), call_location=__file__)

        # @Checkpoint: Get the saved payload (data kapt for each tool)
        # pair list of testfailed verdict and execution output
        meta_test_failedverdicts_outlog = \
                                    checkpoint_handler.get_optional_payload()
        if meta_test_failedverdicts_outlog is None:
            meta_test_failedverdicts_outlog = [{}, {}]

        # Make sure the tests are unique
        ERROR_HANDLER.assert_true(len(meta_testcases) == \
                                                len(set(meta_testcases)), \
                                        "not all tests are unique", __file__)

        testcases_by_tool = {}
        for meta_testcase in meta_testcases:
            ttoolalias, testcase = \
                            DriversUtils.reverse_meta_element(meta_testcase)
            if ttoolalias not in testcases_by_tool:
                testcases_by_tool[ttoolalias] = []
            testcases_by_tool[ttoolalias].append(testcase)

        found_a_failure = False
        for tpos, ttoolalias in enumerate(testcases_by_tool.keys()):
            # @Checkpoint: Check whether already executed
            if not checkpoint_handler.is_to_execute(func_name=cp_func_name, \
                                                taskid=cp_task_id, \
                                                tool=ttoolalias):
                continue

            # Actual execution
            ttool = \
                self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY]
            test_failed_verdicts, test_execoutput = ttool.runtests( \
                                            testcases_by_tool[ttoolalias], \
                                            exe_path_map, env_vars, \
                                            stop_on_failure, \
                                            per_test_timeout=per_test_timeout,
                                            use_recorded_timeout_times=\
                                                use_recorded_timeout_times, \
                                            recalculate_execution_times=\
                                                recalculate_execution_times, \
                                            with_outlog_hash=with_outlog_hash)
            for testcase in test_failed_verdicts:
                meta_testcase =  \
                        DriversUtils.make_meta_element(testcase, ttoolalias)
                meta_test_failedverdicts_outlog[0][meta_testcase] = \
                                                test_failed_verdicts[testcase]
                meta_test_failedverdicts_outlog[1][meta_testcase] = \
                                                    test_execoutput[testcase]
                if test_failed_verdicts[testcase] == \
                                common_mix.GlobalConstants.COMMAND_UNCERTAIN:
                    found_a_failure = True

            # @Checkpoint: Chekpointing
            checkpoint_handler.do_checkpoint(func_name=cp_func_name, \
                                taskid=cp_task_id, \
                                tool=ttoolalias, \
                                opt_payload=meta_test_failedverdicts_outlog)

            if stop_on_failure and found_a_failure:
                # @Checkpoint: Chekpointing for remaining tools
                for rem_tool in list(testcases_by_tool.keys())[tpos + 1:]:
                    checkpoint_handler.do_checkpoint(func_name=cp_func_name, \
                                taskid=cp_task_id, \
                                tool=rem_tool, \
                                opt_payload=meta_test_failedverdicts_outlog)
                break

        if stop_on_failure:
            # Make sure the non executed test has the uncertain value (None)
            if len(meta_test_failedverdicts_outlog[0]) < len(meta_testcases):
                for meta_testcase in set(meta_testcases) - \
                                    set(meta_test_failedverdicts_outlog[0]):
                    meta_test_failedverdicts_outlog[0][meta_testcase] = \
                            common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT
                    meta_test_failedverdicts_outlog[1][meta_testcase] = \
                            common_matrices.OutputLogData.\
                                                    UNCERTAIN_TEST_OUTLOGDATA

        ERROR_HANDLER.assert_true(len(meta_test_failedverdicts_outlog[0]) == \
                                                        len(meta_testcases), \
                            "Not all tests have a verdict reported", __file__)

        if fault_test_execution_matrix_file is not None:
            # Load or Create the matrix
            fault_test_execution_matrix = common_matrices.ExecutionMatrix( \
                                filename=fault_test_execution_matrix_file, \
                                            non_key_col_list=meta_testcases)
            ERROR_HANDLER.assert_true(fault_test_execution_matrix.is_empty(), \
                                            "matrix must be empty", __file__)
            failverdict2val = {
                common_mix.GlobalConstants.FAIL_TEST_VERDICT: \
                        fault_test_execution_matrix.getActiveCellDefaultVal(),
                common_mix.GlobalConstants.PASS_TEST_VERDICT: \
                            fault_test_execution_matrix.getInactiveCellVal(),
                common_mix.GlobalConstants.UNCERTAIN_TEST_VERDICT: \
                    fault_test_execution_matrix.getUncertainCellDefaultVal(),
            }
            cells_dict = {}
            for meta_testcase in meta_test_failedverdicts_outlog[0]:
                cells_dict[meta_testcase] = failverdict2val[\
                            meta_test_failedverdicts_outlog[0][meta_testcase]]

            fault_test_execution_matrix.add_row_by_key(self.FAULT_MATRIX_KEY, \
                                                cells_dict, serialize=True)

        if fault_test_execution_execoutput_file is None:
            meta_test_failedverdicts_outlog[1] = None
        else:
            # Load or Create the data object
            fault_test_execution_execoutput = common_matrices.OutputLogData( \
                                filename=fault_test_execution_execoutput_file)
            ERROR_HANDLER.assert_true(\
                            fault_test_execution_execoutput.is_empty(), \
                                        "outlog data must be empty", __file__)
            fault_test_execution_execoutput.add_data(\
                                    {self.PROGRAM_EXECOUTPUT_KEY: \
                                         meta_test_failedverdicts_outlog[1]}, \
                                                                serialize=True)

        # @Checkpoint: Finished
        detailed_exectime = {}
        for ttoolalias in testcases_by_tool.keys():
            tt = self.testcases_configured_tools[ttoolalias][self.TOOL_OBJ_KEY]
            detailed_exectime[ttoolalias] = (\
                        tt.get_checkpointer().get_execution_time(),\
                        tt.get_checkpointer().get_detailed_execution_time())

        checkpoint_handler.set_finished( \
                                    detailed_exectime_obj=detailed_exectime)

        if finish_destroy_checkpointer:
            checkpoint_handler.destroy()

        return meta_test_failedverdicts_outlog
Esempio n. 13
0
    def runtests_criteria_coverage (self, testcases, criterion_to_matrix, \
                                    criterion_to_executionoutput=None,
                                    criteria_element_list_by_criteria=None, \
                                    re_instrument_code=False, \
                                    cover_criteria_elements_once=False,
                                    prioritization_module_by_criteria=None,
                                    parallel_count=1, \
                                    parallel_criteria_test_scheduler=None,\
                                    restart_checkpointer=False, \
                                    finish_destroy_checkpointer=True):
        ''' 
        Executes the instrumented executable code with testscases and
        returns the different code coverage matrices.

        :param testcases: list of testcases to execute

        :param criterion_to_matrix: dict of <criterion, Matrix file 
                        where to store coverage>. 
        :param criterion_to_executionoutput: dict of <criterion, execoutput 
                        file where to store coverage>. 
        
        :param criteria_element_list_by_criteria: dictionary representing the
                        list of criteria elements (stmts, branches, mutants)
                        to consider in the test execution matices. 
                        Key is the criterion and the value the list of elements

        :param re_instrument_code: Decide whether to instrument code before 
                        running the tests. (Example when instrumentation was 
                        not specifically called. This is True by default)

        :param cover_criteria_elements_once: Specify whether to cover criteria
                        elements once is enough, meaning that we stop 
                        analysing a criterion element once a test covers it.
                        The remaining test covering verdict will be UNKNOWN. 

        :param prioritization_module_by_criteria: dict of prioritization module
                        by criteria. None means no prioritization used.

        :type \parallel_count:
        :param \parallel_count:

        :type \parallel_criteria_test_scheduler:
        :param \parallel_criteria_test_scheduler: scheduler that organize 
                        parallelism across criteria tools.
                        (TODO: Implement support)

        :type \restart_checkpointer:
        :param \restart_checkpointer:

        :type finish_destroy_checkpointer:
        :param finish_destroy_checkpointer:
        '''

        # FIXME: Make sure that the support are implemented for
        # parallelism and test prioritization. Remove the code bellow
        # once supported:
        ERROR_HANDLER.assert_true(parallel_count <= 1, \
                    "Must implement parallel execution support here", \
                                                                    __file__)
        ERROR_HANDLER.assert_true(parallel_criteria_test_scheduler is None, \
            "Must implement parallel codes tests execution support here", \
                                                                    __file__)
        #~FIXMEnd

        # Check arguments Validity
        ERROR_HANDLER.assert_true(parallel_count > 0, \
                    "invalid parallel  execution count: {}. {}".format( \
                                    parallel_count, "must be >= 1"))

        # @Checkpoint: create a checkpoint handler
        cp_func_name = "runtests_criteria_coverage"
        cp_task_id = 1
        checkpoint_handler = CheckPointHandler( \
                                            self.get_checkpoint_state_object())
        if restart_checkpointer:
            checkpoint_handler.restart()
        if checkpoint_handler.is_finished():
            return

        ERROR_HANDLER.assert_true(len(criterion_to_matrix) > 0, \
                                        "no criterion is enabled", __file__)

        ERROR_HANDLER.assert_true(len(set(criterion_to_matrix) - \
                            set(self.tools_config_by_criterion_dict)) == 0, \
                    "Passed matrices output are more than tool specified", \
                                                                    __file__)

        if criterion_to_executionoutput is not None:
            ERROR_HANDLER.assert_true(set(criterion_to_matrix) == \
                                        set(criterion_to_executionoutput), \
                            "criteria mismatch between matrix and output", \
                                                                    __file__)

        tool2criteria = self._get_tool2criteria(criterion_to_matrix.keys())

        matrices_dir_tmp = os.path.join(self.criteria_working_dir, \
                                                            "codecov_dir.tmp")
        if os.path.isdir(matrices_dir_tmp):
            if restart_checkpointer:
                shutil.rmtree(matrices_dir_tmp)
                os.mkdir(matrices_dir_tmp)
        else:
            os.mkdir(matrices_dir_tmp)

        if criteria_element_list_by_criteria is None:
            criteria_element_list_by_criteria = \
                                        {c: None for c in criterion_to_matrix}

        # get criteria elements by tools
        criteria_elem_list_by_tool = {}
        for criterion in criteria_element_list_by_criteria:
            if criteria_element_list_by_criteria[criterion] is None:
                for t_conf in self.tools_config_by_criterion_dict[criterion]:
                    toolalias = t_conf.get_tool_config_alias()
                    if toolalias not in criteria_elem_list_by_tool:
                        criteria_elem_list_by_tool[toolalias] = {}
                    criteria_elem_list_by_tool[toolalias][criterion] = None
                continue

            ERROR_HANDLER.assert_true(\
                    len(criteria_element_list_by_criteria[criterion]) != 0, \
                    "Empty criteria element list for criterion "\
                                            +criterion.get_str(), __file__)
            for crit_elem in criteria_element_list_by_criteria[criterion]:
                toolalias, elem = DriversUtils.reverse_meta_element(crit_elem)
                if toolalias not in criteria_elem_list_by_tool:
                    criteria_elem_list_by_tool[toolalias] = {}
                if criterion not in criteria_elem_list_by_tool[toolalias]:
                    criteria_elem_list_by_tool[toolalias][criterion] = []
                criteria_elem_list_by_tool[toolalias][criterion].append(elem)

            ERROR_HANDLER.assert_true(len(set(criteria_elem_list_by_tool) - \
                                set(self.criteria_configured_tools)) == 0, \
                                "some tools in data not registered", __file__)

        crit2tool2matrixfile = {cv: {} for cv in criterion_to_matrix}
        crit2tool2outhashfile = {cv: {} for cv in criterion_to_executionoutput}
        for ctoolalias in tool2criteria:
            _criteria2matrix = {}
            _criteria2outhash = {}
            for criterion in tool2criteria[ctoolalias]:
                _criteria2matrix[criterion] = os.path.join(matrices_dir_tmp, \
                                                criterion.get_field_value()
                                                                + '-'
                                                                + ctoolalias
                                                                + '.csv')
                if criterion_to_executionoutput is None or \
                            criterion_to_executionoutput[criterion] is None:
                    _criteria2outhash[criterion] = None
                else:
                    _criteria2outhash[criterion] = \
                                            os.path.join(matrices_dir_tmp, \
                                                criterion.get_field_value()
                                                        + '-'
                                                        + ctoolalias
                                                        + '.outloghash.json')
                crit2tool2matrixfile[criterion][ctoolalias] = \
                                                    _criteria2matrix[criterion]
                crit2tool2outhashfile[criterion][ctoolalias] = \
                                                _criteria2outhash[criterion]

            # @Checkpoint: Check whether already executed
            if checkpoint_handler.is_to_execute( \
                                        func_name=cp_func_name, \
                                        taskid=cp_task_id, \
                                        tool=ctoolalias):
                for criterion in _criteria2matrix:
                    _criteria2matrix[criterion] = \
                                        common_matrices.ExecutionMatrix( \
                                        filename=_criteria2matrix[criterion], \
                                        non_key_col_list=testcases)
                    if _criteria2outhash[criterion] is not None:
                        _criteria2outhash[criterion] = \
                                        common_matrices.OutputLogData( \
                                        filename=_criteria2outhash[criterion])
                # Actual execution
                ctool = self.criteria_configured_tools[ctoolalias][\
                                                            self.TOOL_OBJ_KEY]
                ctool.runtests_criteria_coverage(testcases, \
                                criteria_element_list_by_criteria=\
                                        criteria_elem_list_by_tool[toolalias],\
                                criterion_to_matrix=_criteria2matrix, \
                                criterion_to_executionoutput=\
                                                            _criteria2outhash,\
                                re_instrument_code=re_instrument_code, \
                                cover_criteria_elements_once=\
                                                cover_criteria_elements_once, \
                                prioritization_module_by_criteria=\
                                            prioritization_module_by_criteria)

                # Checkpointing
                checkpoint_handler.do_checkpoint( \
                                        func_name=cp_func_name, \
                                        taskid=cp_task_id, \
                                        tool=ctoolalias)

        # Aggregate the matrices and out hashes
        ## Create reult matrices and out hashes
        result_matrices = {}
        result_outloghashes = {}
        for criterion in criterion_to_matrix:
            result_matrices[criterion] = common_matrices.ExecutionMatrix( \
                                filename=criterion_to_matrix[criterion], \
                                non_key_col_list=testcases)
            if criterion_to_executionoutput[criterion] is None:
                result_outloghashes[criterion] = None
            else:
                result_outloghashes[criterion] = \
                            common_matrices.OutputLogData(filename=\
                                    criterion_to_executionoutput[criterion])
                ERROR_HANDLER.assert_true(\
                            crit2tool2outhashfile[criterion] is not None,
                            "Bug: log enabled but hidden from tool", __file__)
        ## Actual aggregate
        logging.debug("saving results ...")
        for criterion in result_matrices:
            result_matrix = result_matrices[criterion]
            result_outloghash = result_outloghashes[criterion]
            for mtoolalias in crit2tool2matrixfile[criterion]:
                tool_matrix = common_matrices.ExecutionMatrix(\
                        filename=crit2tool2matrixfile[criterion][mtoolalias])

                # Check columns
                ERROR_HANDLER.assert_true(tool_matrix.get_key_colname() == \
                                            result_matrix.get_key_colname(), \
                                    "mismatch on key column name", __file__)
                ERROR_HANDLER.assert_true( \
                                set(tool_matrix.get_nonkey_colname_list()) == \
                                set(result_matrix.get_nonkey_colname_list()), \
                                "mismatch on non key column names", __file__)

                # bring in the data
                key2nonkeydict = tool_matrix.to_pandas_df().\
                        set_index(tool_matrix.get_key_colname(), drop=True).\
                                                to_dict(orient="index")

                for c_key in key2nonkeydict:
                    meta_c_key = DriversUtils.make_meta_element(\
                                                        str(c_key), mtoolalias)
                    result_matrix.add_row_by_key(meta_c_key,
                                                 key2nonkeydict[c_key],
                                                 serialize=False)

                # out log hash
                if crit2tool2outhashfile[criterion] is not None:
                    tool_outloghash = common_matrices.OutputLogData(\
                            filename=\
                                crit2tool2outhashfile[criterion][mtoolalias])
                    for objective, objective_data in \
                                tool_outloghash.get_zip_objective_and_data():
                        meta_objective = DriversUtils.make_meta_element(\
                                                    str(objective), mtoolalias)
                        result_outloghash.add_data(
                                            {meta_objective: objective_data}, \
                                            serialize=False)

            # @Checkpoint: Check whether already executed
            if checkpoint_handler.is_to_execute( \
                                        func_name=cp_func_name, \
                                        taskid=cp_task_id + 1,
                                        tool=criterion.get_str()):
                # Serialized the computed matrix
                result_matrix.serialize()
                if result_outloghash is not None:
                    result_outloghash.serialize()
            # @Checkpoint: Checkpointing
            checkpoint_handler.do_checkpoint( \
                                    func_name=cp_func_name, \
                                    taskid=cp_task_id + 1,
                                    tool=criterion.get_str())

        # Delete the temporary tool matrix's directory
        if os.path.isdir(matrices_dir_tmp):
            shutil.rmtree(matrices_dir_tmp)

        # @Checkpoint: Finished
        detailed_exectime = {}
        for ctoolalias in tool2criteria:
            ct = self.criteria_configured_tools[ctoolalias][self.TOOL_OBJ_KEY]
            detailed_exectime[ctoolalias] = (\
                        ct.get_checkpointer().get_execution_time(),\
                        ct.get_checkpointer().get_detailed_execution_time())

        checkpoint_handler.set_finished(\
                                    detailed_exectime_obj=detailed_exectime)

        if finish_destroy_checkpointer:
            checkpoint_handler.destroy()
Esempio n. 14
0
    def _extract_coverage_data_of_a_test(self, enabled_criteria, \
                                    test_execution_verdict, result_dir_tmp):
        ''' read json files and extract data
            return: the dict of criteria with covering count
            # TODO: Restrict to returning coverage of specified headers files
        '''
        gcov_list = common_fs.loadJSON(os.path.join(result_dir_tmp,\
                                                self.gcov_files_list_filename))

        res = {c: {} for c in enabled_criteria}

        func_cov = None
        branch_cov = None
        statement_cov = {}
        if TestCriteria.FUNCTION_COVERAGE in enabled_criteria:
            func_cov = res[TestCriteria.FUNCTION_COVERAGE]
        if TestCriteria.BRANCH_COVERAGE in enabled_criteria:
            branch_cov = res[TestCriteria.BRANCH_COVERAGE]
        if TestCriteria.STATEMENT_COVERAGE in enabled_criteria:
            statement_cov = res[TestCriteria.STATEMENT_COVERAGE]

        # Sources of interest
        _, src_map = self.code_builds_factory.repository_manager.\
                                                    get_relative_exe_path_map()

        #logging.debug("gcov_list: {}".format(gcov_list))

        for gcov_file in gcov_list:
            with open(gcov_file) as fp:
                last_line = None
                src_file = None
                for raw_line in fp:
                    line = raw_line.strip()
                    col_split = [v.strip() for v in line.split(':')]

                    if len(col_split) > 2 and col_split[1] == '0':
                        # preamble
                        if col_split[2] == "Source":
                            src_file = os.path.normpath(col_split[3])
                            if not src_file in src_map:
                                s_cand = None
                                for s in src_map.keys():
                                    if s.endswith(os.sep + src_file):
                                        ERROR_HANDLER.assert_true(s_cand is None,\
                                              "multiple candidate, maybe same "+\
                                              "source name in different dirs "+\
                                              "for source {}".format(src_file), \
                                                                        __file__)
                                        s_cand = s
                                if s_cand is None:
                                    # If src file does not represent (not equal
                                    # and not relatively equal, for case where
                                    # build happens not in rootdir),
                                    # src not in considered
                                    break
                                else:
                                    # ensure compatibility in matrices
                                    src_file = s_cand
                    elif line.startswith("function "):
                        # match function
                        parts = line.split()
                        ident = DriversUtils.make_meta_element(parts[1], \
                                                                    src_file)
                        func_cov[ident] = int(parts[3])
                    elif line.startswith("branch "):
                        # match branch
                        parts = line.split()
                        ident = DriversUtils.make_meta_element(parts[1], \
                                                                    last_line)
                        if parts[2:4] == ['never', 'executed']:
                            branch_cov[ident] = 0
                        else:
                            branch_cov[ident] = int(parts[3])

                    elif len(col_split) > 2 and \
                                            re.match(r"^\d+$", col_split[1]):
                        # match line
                        if col_split[0] == '-':
                            continue
                        last_line = DriversUtils.make_meta_element(\
                                                        col_split[1], src_file)
                        if col_split[0] in ('#####', '====='):
                            exec_count = 0
                        else:
                            exec_count = \
                                    int(re.findall(r'^\d+', col_split[0])[0])
                        statement_cov[last_line] = exec_count

        # delete gcov files
        for gcov_f in self._get_gcov_list():
            os.remove(gcov_f)

        #logging.debug("gcov res is: {}".format(res))

        return res