Beispiel #1
0
    def run(data, skip_on_failure=False):
        source_files = data["source_files"]
        lines_of_code = data["lines_of_code"]
        cpp = data["use_cpp"]

        language = 'c++' if cpp else 'c'
        # TODO: find out the purpose of the --template=cppcheck1' which broke the output
        cppcheck_call = [
            TOOLS.CPPCHECK.exe_name, '--enable=all', '--force',
            '--language=' + language, "-v"
        ]

        output = ""
        chunk_size = 1000  # should fix that "OSError: [Errno 7] Argument lst too long: 'cppcheck'" thing

        try:
            argument_chunks = util.split_in_chunks(source_files, chunk_size)
            for chunk in argument_chunks:
                temp_call = cppcheck_call  # TODO: check this again
                temp_call.extend(chunk)
                output += subprocess.check_output(cppcheck_call,
                                                  universal_newlines=True,
                                                  stderr=subprocess.STDOUT,
                                                  encoding='utf-8',
                                                  errors='ignore') + "\n"
            warning_lines = CppcheckTool.get_warning_lines(output)
            cppcheck_output = output_classes.CppcheckOutput(warning_lines)
        except subprocess.CalledProcessError as error:
            print("cppcheck failed!")
            print(
                strings.COMPILATION_CRASHED.format(error.returncode,
                                                   error.output))
            if not skip_on_failure:
                raise
            return [0], "", False
        except Exception:  # catch the rest and exclude the analysis tool from the score
            if not skip_on_failure:
                raise
            return [0], "", False

        weighted_cppcheck_rate, temp = cppcheck_output.get_information(
            lines_of_code)
        util.write_into_file_list(strings.RESULTS_FILENAME_CPPCHECK,
                                  warning_lines)

        score = scoring.calculate_cppcheck_score_absolute(
            weighted_cppcheck_rate)

        log = strings.RUN_CPPCHECK_HEADER + "\n"
        log += temp
        log += strings.DETAILLED_RESULTS_WRITTEN_INTO.format(
            strings.RESULTS_FILENAME_CPPCHECK) + "\n"
        log += scoring.get_score_string(score, 'Cppcheck') + "\n"

        return [score], log, True
Beispiel #2
0
    def get_information(self):
        log = ""

        log += 'Average cyclomatic complexity: {}'.format(
            self.average_cyclomatic_complexity) + "\n"
        cyclomatic_complexity_score = scoring.calculate_cyclomatic_complexity_score_absolute(
            self.average_cyclomatic_complexity)
        log += scoring.get_score_string(cyclomatic_complexity_score,
                                        'Cyclomatic complexity') + "\n"

        warning_rate = self.warning_count / self.function_count
        log += 'Lizard warning rate (~= rate of functions that are too complex): ' + strings.RATE_COUNT_TOTAL.format(
            warning_rate, self.warning_count, self.function_count) + "\n"
        warning_score = scoring.calculate_lizard_warning_score_absolute(
            warning_rate)
        log += scoring.get_score_string(warning_score, 'Lizard warning') + "\n"

        log += 'Unique code rate: {}'.format(self.unique_rate) + "\n"
        unique_score = scoring.calculate_unique_score_absolute(
            self.unique_rate)
        log += scoring.get_score_string(unique_score,
                                        'Unique (code duplication)') + "\n"

        return cyclomatic_complexity_score, warning_score, unique_score, log
Beispiel #3
0
    def run(data, skip_on_failure=False):
        loc = data["lines_of_code"]
        source_files = data["source_files"]

        source_files_wo_tests = [
            x for x in source_files if not util.is_testfile(x)
        ]
        loc_wo_tests = util.count_lines_of_code(source_files_wo_tests)

        rate = (loc - loc_wo_tests) / loc
        score = scoring.calculate_testcount_score_absolute(rate)

        log = " --- TEST COUNT --- \n"
        log += strings.LINES_OF_PURE_CODE_ARE.format(loc) + "\n"
        log += "Amount of unit test LOC compared to overall LOC: {} ({}/{})\n".format(
            rate, (loc - loc_wo_tests), loc)
        log += scoring.get_score_string(score, TestCountTool.name()) + "\n"

        return [score], log, True
Beispiel #4
0
    def run(data, skip_on_failure=False):
        source_files = data["source_files"]
        lines_of_code = data["lines_of_code"]

        softwipe_directory = os.path.dirname(os.path.realpath(__file__))
        kwstyle_xml = os.path.join(softwipe_directory, 'KWStyle.xml')
        kwstyle_call = [TOOLS.KWSTYLE.exe_name, '-v', '-xml', kwstyle_xml]

        output = ''
        # KWStyle only works properly when specifying just one single input file. Thus, iterate and call KWStyle again
        # for each source file, each time appending to the result output.
        for source_file in source_files:
            cur_kwstyle_call = kwstyle_call[::]
            cur_kwstyle_call.append(source_file)
            try:
                output += subprocess.check_output(cur_kwstyle_call,
                                                  universal_newlines=True,
                                                  stderr=subprocess.STDOUT)
            except subprocess.CalledProcessError as error:
                # Same as with the lizard call. KWStyle exits with status 1 by default.
                # So catch that, ignore the exception, and keep the output of the command
                output += error.output
            except Exception:  # catch the rest and exclude the analysis tool from the score
                if not skip_on_failure:
                    raise
                return [0], "", False

        warning_count = KWStyleTool.get_warning_count(output)
        warning_rate = warning_count / lines_of_code

        util.write_into_file_string(strings.RESULTS_FILENAME_KWSTYLE, output)

        score = scoring.calculate_kwstyle_score_absolute(warning_rate)

        log = strings.RUN_KWSTYLE_HEADER + "\n"
        log += strings.RESULT_KWSTYLE_WARNING_RATE.format(
            warning_rate, warning_count, lines_of_code) + "\n"
        log += strings.DETAILLED_RESULTS_WRITTEN_INTO.format(
            strings.RESULTS_FILENAME_KWSTYLE) + "\n"
        log += scoring.get_score_string(score, 'KWStyle') + "\n"

        return [score], log, True
Beispiel #5
0
    def run(data, skip_on_failure=False):
        source_files = data["source_files"]
        lines_of_code = data["lines_of_code"]
        custom_asserts = data["custom_asserts"]

        assert_count = 0

        for path in source_files:
            file = open(path, 'r', encoding='latin-1')

            file_lines = file.readlines()
            for line in file_lines:
                if AssertionTool.is_assert(line, custom_asserts):
                    assert_count += 1
            file.close()

        assertion_rate = assert_count / lines_of_code

        detailled_result_string = strings.RESULT_ASSERTION_RATE_DETAILED.format(
            count=assert_count,
            loc=lines_of_code,
            rate=assertion_rate,
            percentage=100 * assertion_rate)

        util.write_into_file_string(strings.RESULTS_FILENAME_ASSERTION_CHECK,
                                    detailled_result_string)

        score = scoring.calculate_assertion_score_absolute(assertion_rate)

        log = strings.RUN_ASSERTION_CHECK_HEADER + "\n"
        log += strings.RESULT_ASSERTION_RATE.format(
            assertion_rate, assert_count, lines_of_code) + "\n"
        log += strings.DETAILLED_RESULTS_WRITTEN_INTO.format(
            strings.RESULTS_FILENAME_ASSERTION_CHECK) + "\n"
        log += scoring.get_score_string(score, 'Assertion') + "\n"

        return [score], log, True
Beispiel #6
0
    def run(data, skip_on_failure=False):
        program_dir_abs = data["program_dir_abs"] + "/softwipe_build/" if data[
            "use_cmake"] else data["program_dir_abs"]
        executefile = data["executefile"][0]
        lines_of_code = data["lines_of_code"]
        CompileTool.run(data)

        print(executefile)
        print("-------- RERUNNING COMPILATION FOR VALGRIND --------")
        print(strings.RUN_EXECUTION_WITH_SANITIZERS_HEADER)

        command = ["valgrind", "--error-exitcode=123"]
        output = ""

        print(program_dir_abs)

        if executefile and os.path.isfile(executefile):
            file = open(executefile, 'r')
            lines = file.readlines()
            file.close()

            command_line = os.path.join(program_dir_abs, lines[0])
            command.extend(command_line.split())

        # Execute and get stderr, which contains the output of the sanitizers
        print(command)

        try:
            output = subprocess.check_output(command,
                                             cwd=program_dir_abs,
                                             universal_newlines=True,
                                             stderr=subprocess.STDOUT)
        except FileNotFoundError as e1:
            print(e1)
            print(strings.EXECUTION_FILE_NOT_FOUND.format(command[1]))
            return [], "", False
        except subprocess.CalledProcessError as error:
            if error.returncode == 123:
                pass
            else:
                print(error.output)
                raise

        weighted_warnings = ValgrindTool.get_weighted_warning_count(output)
        warning_rate = weighted_warnings / lines_of_code
        warning_log = ValgrindTool.get_warning_log(output)
        score = scoring.calculate_valgrind_score_absolute(warning_rate)

        util.write_into_file_string(strings.RESULTS_FILENAME_VALGRIND, output)

        log = strings.RUN_VALGRIND_ANALYSIS_HEADER + "\n"
        log += warning_log + "\n"
        # TODO: make and print filename to user
        log += "Weighted Valgrind warning rate: {} ({}/{})".format(
            weighted_warnings / lines_of_code, weighted_warnings,
            lines_of_code) + "\n"
        log += scoring.get_score_string(score, ValgrindTool.name()) + "\n"
        log += strings.DETAILLED_RESULTS_WRITTEN_INTO.format(
            strings.RESULTS_FILENAME_VALGRIND)

        return [score], log, True
Beispiel #7
0
    def run(data, skip_on_failure=False):
        program_dir_abs = data["program_dir_abs"]
        lines_of_code = data["lines_of_code"]
        use_cmake = data["use_cmake"]
        use_make = data["use_make"]
        excluded_paths = data["excluded_paths"]
        compilation_status = False

        if use_cmake:
            program_dir_abs += "/" + strings.INFER_BUILD_DIR_NAME
            compilation_status = InferTool.compile_with_cmake(
                program_dir_abs, excluded_paths)
        elif use_make:
            compilation_status = InferTool.compile_with_make(
                program_dir_abs, excluded_paths)

        if not compilation_status:
            return [0], "", False

        # TODO: maybe fix the error handling differently (not by the --keep-going flag)
        infer_analyze = [TOOLS.INFER.exe_name, "analyze", "--keep-going"]

        try:
            subprocess.check_output(infer_analyze,
                                    cwd=program_dir_abs,
                                    universal_newlines=True,
                                    stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as error:
            print(
                strings.COMPILATION_CRASHED.format(error.returncode,
                                                   error.output))
            template = "An exception of type {0} occurred. Arguments:\n{1!r}"
            message = template.format(type(error).__name__, error.args)
            print(message)
            if not skip_on_failure:
                raise
            return [0], "", False
        except Exception:  # catch the rest and exclude the analysis tool from the score
            if not skip_on_failure:
                raise
            return [0], "", False

        infer_out_path = util.find_file(
            program_dir_abs,
            strings.INFER_OUTPUT_FILE_NAME,
            directory=strings.INFER_OUTPUT_DIR_NAME)
        if infer_out_path == "":
            return [0], "Could not find {}".format(
                strings.INFER_OUTPUT_FILE_NAME), False

        file_out, warnings, warning_num = InferTool.get_warnings_from_output(
            infer_out_path)
        util.write_into_file_string(strings.RESULTS_FILENAME_INFER, file_out)

        infer_warning_rate = warning_num / lines_of_code
        score = scoring.calculate_infer_score_absolute(infer_warning_rate)

        log = strings.RUN_INFER_ANALYSIS_HEADER + "\n"
        log += warnings + "\n"
        # TODO: make and print filename to user
        log += "Weighted Infer warning rate: {} ({}/{})".format(
            warning_num / lines_of_code, warning_num, lines_of_code) + "\n"
        log += scoring.get_score_string(score, 'Infer') + "\n"

        return [score], log, True
Beispiel #8
0
    def run(data, skip_on_failure=False, num_tries=5):
        program_dir_abs = data["program_dir_abs"]
        source_files = data["source_files"]
        lines_of_code = data["lines_of_code"]
        cpp = data["use_cpp"]
        """
        Runs clang-tidy.
        :param program_dir_abs: The absolute path to the root directory of the target program.
        :param source_files: The lst of source files to analyze.
        :param lines_of_code: The lines of pure code count.
        :param cpp: Whether C++ is used or not. True if C++, false if C.
        :param num_tries: The amount of times clang-tidy should be rerun if it runs into internal problems
        :return: 1. The clang-tidy score.
                 2. output log
                 3. boolean success
        """
        clang_tidy_call = [TOOLS.CLANG_TIDY.exe_name]
        clang_tidy_call.extend(source_files)

        # Create checks lst
        clang_tidy_checks = strings.CLANG_TIDY_CHECKS_CPP if cpp else strings.CLANG_TIDY_CHECKS_C
        clang_tidy_call.append('-checks=' + clang_tidy_checks)
        clang_tidy_call.extend(['-p', program_dir_abs])

        try:
            output = subprocess.check_output(clang_tidy_call,
                                             universal_newlines=True,
                                             stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as error:
            output = error.output

            if num_tries < 0:  # make clang-tidy return a failure status if it has no tries left
                return [0], "", False

            if error.returncode == -11:  # clang-tidy seems to run into segfaults sometimes, so rerun it if that happens
                return ClangTidyTool.run(data, num_tries=num_tries - 1)
            # clang-tidy can exit with exit code 1 if there is no compilation database, which might be the case when
            # compiling with just clang. Thus, ignore the exception here.
        except Exception:  # catch the rest and exclude the analysis tool from the score
            if not skip_on_failure:
                raise
            return [0], "", False

        warning_lines = ClangTidyTool.get_warning_lines(output)
        weighted_warning_count = ClangTidyTool.get_weighted_warning_count(
            warning_lines)
        warning_rate = weighted_warning_count / lines_of_code

        beautified_warning_lines = ClangTidyTool.beatify_warning_lines(
            warning_lines)
        util.write_into_file_list(strings.RESULTS_FILENAME_CLANG_TIDY,
                                  beautified_warning_lines)

        score = scoring.calculate_clang_tidy_score_absolute(warning_rate)

        log = strings.RUN_CLANG_TIDY_HEADER + "\n"
        log += strings.RESULT_WEIGHTED_CLANG_TIDY_WARNING_RATE.format(
            warning_rate, weighted_warning_count, lines_of_code) + "\n"
        log += strings.DETAILLED_RESULTS_WRITTEN_INTO.format(
            strings.RESULTS_FILENAME_CLANG_TIDY) + "\n"
        log += scoring.get_score_string(score, 'Clang-tidy') + "\n"

        return [score], log, True