Пример #1
0
def check_assert_usage(source_files, lines_of_code):
    """
    Check how many assertions are used in the code.
    :param source_files: The list of files to count assertions in.
    :param lines_of_code: The total lines of code.
    :return: The assertion score.
    """
    print(strings.RUN_ASSERTION_CHECK_HEADER)
    assert_count = 0

    for file in source_files:
        f = open(file, 'r', encoding='latin-1')

        file_lines = f.readlines()
        for line in file_lines:
            if assertion_used_in_code_line(line):
                assert_count += 1

        f.close()

    assertion_rate = assert_count / lines_of_code

    detailled_result_string = strings.RESULT_ASSERTION_RATE_DETAILED.format(count=assert_count, loc=lines_of_code,
                                                                            rate=assertion_rate,
                                                                            percentage=100*assertion_rate)
    print(strings.RESULT_ASSERTION_RATE.format(assertion_rate, assert_count, lines_of_code))
    util.write_into_file_string(strings.RESULTS_FILENAME_ASSERTION_CHECK, detailled_result_string)

    score = scoring.calculate_assertion_score(assertion_rate)
    scoring.print_score(score, 'Assertion')
    return score
Пример #2
0
def run_kwstyle(source_files, lines_of_code):
    """
    Runs KWStyle.
    :param source_files: The list of source files to analyze.
    :param lines_of_code: The lines of pure code count.
    :return: The KWStyle score.
    """
    print(strings.RUN_KWSTYLE_HEADER)

    softwipe_directory = os.path.dirname(os.path.realpath(__file__))
    kwstyle_xml = os.path.join(softwipe_directory, 'KWStyle.xml')
    kwstyle_call = [TOOLS.KWSTYLE.exe_name, '-v', '-xml', kwstyle_xml]

    output = ''
    # KWStyle only works properly when specifying just one single input file. Thus, iterate and call KWStyle again
    # for each source file, each time appending to the result output.
    for source_file in source_files:
        cur_kwstyle_call = kwstyle_call[::]
        cur_kwstyle_call.append(source_file)
        try:
            output += subprocess.check_output(cur_kwstyle_call, universal_newlines=True, stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:  # Same as with the lizard call. KWStyle exits with status 1 by
            output += e.output  # default. So catch that, ignore the exception, and keep the output of the command
        
    warning_count = get_kwstyle_warning_count_from_kwstyle_output(output)
    warning_rate = warning_count / lines_of_code

    print(strings.RESULT_KWSTYLE_WARNING_RATE.format(warning_rate, warning_count, lines_of_code))
    util.write_into_file_string(strings.RESULTS_FILENAME_KWSTYLE, output)

    score = scoring.calculate_kwstyle_score(warning_rate)
    scoring.print_score(score, 'KWStyle')
    return score
Пример #3
0
def run_lizard(source_files):
    """
    Runs Lizard.
    :param source_files: The list of source files to analyze.
    :return: The cyclomatic complexity score, warning score, and unique score
    """
    # NOTE Although lizard can be used as a python module ("import lizard") it is actually easier to parse its output
    # (for now at least - this might of course change). This is because the module is not well documented so it's
    # hard to find out how exactly one can get _all_ information using it. Plus, this way we can check if it is
    # installed using shutil.which --> consistent with how we check for the other tools.
    print(strings.RUN_LIZARD_HEADER)

    lizard_call = [TOOLS.LIZARD.exe_name, '-Eduplicate', '-l', 'cpp']
    lizard_call.extend(source_files)

    try:
        output = subprocess.check_output(lizard_call, universal_newlines=True, stderr=subprocess.STDOUT)
    except subprocess.CalledProcessError as e:  # If warnings are generated, Lizard exits with exit code 1
        output = e.output  # Basically, this catches the exception and ignores it such that this tool doesn't crash
        # while still keeping the output of the command

    lizard_output = get_lizard_output_object_from_lizard_printed_output(output)
    cyclomatic_complexity_score, warning_score, unique_score = \
        lizard_output.print_information_and_return_scores()  # Also prints the scores
    util.write_into_file_string(strings.RESULTS_FILENAME_LIZARD, output)

    return cyclomatic_complexity_score, warning_score, unique_score
Пример #4
0
def run_execution(program_dir_abs, executefile, cmake, lines_of_code):
    """
    Execute the program and parse the output of the clang sanitizers.
    :param program_dir_abs: The absolute path to the root directory of the target program.
    :param executefile: The executefile that contains a command line for executing the program.
    :param cmake: Whether CMake has been used for compilation or not.
    :param lines_of_code: The lines of pure code count.
    :return The weighted sum of sanitizer errors.
    """
    print(strings.RUN_EXECUTION_WITH_SANITIZERS_HEADER)

    command, command2 = build_command(program_dir_abs, executefile, cmake)
    os.environ['ASAN_OPTIONS'] = 'halt_on_error=0'

    # Execute and get stderr, which contains the output of the sanitizers
    try:
        output = subprocess.run(command, universal_newlines=True, stdout=subprocess.DEVNULL,
                                stderr=subprocess.PIPE).stderr
    except FileNotFoundError as e1:
        if command2 is not None:
            try:
                output = subprocess.run(command2, universal_newlines=True, stdout=subprocess.DEVNULL,
                                        stderr=subprocess.PIPE).stderr
            except FileNotFoundError as e2:
                print(e1)
                print(strings.EXECUTION_FILE_NOT_FOUND.format(command[0]))
                print(e2)
                print(strings.EXECUTION_FILE_NOT_FOUND.format(command2[0]))
                raise ExecutionFailedException
        else:
            print(e1)
            print(strings.EXECUTION_FILE_NOT_FOUND.format(command[0]))
            raise ExecutionFailedException

    asan_error_count, ubsan_error_count = get_sanitizer_error_count_from_sanitizer_output(output)
    asan_error_rate = asan_error_count / lines_of_code
    ubsan_error_rate = ubsan_error_count / lines_of_code

    print(strings.RESULT_ASAN_ERROR_RATE.format(asan_error_rate, asan_error_count, lines_of_code))
    print(strings.RESULT_UBSAN_ERROR_RATE.format(ubsan_error_rate, ubsan_error_count, lines_of_code))
    util.write_into_file_string(strings.RESULTS_FILENAME_SANITIZERS, output)

    weighted_error_count = 3 * asan_error_count + 3 * ubsan_error_count
    return weighted_error_count
Пример #5
0
    def run(data, skip_on_failure=False):
        source_files = data["source_files"]
        lines_of_code = data["lines_of_code"]

        softwipe_directory = os.path.dirname(os.path.realpath(__file__))
        kwstyle_xml = os.path.join(softwipe_directory, 'KWStyle.xml')
        kwstyle_call = [TOOLS.KWSTYLE.exe_name, '-v', '-xml', kwstyle_xml]

        output = ''
        # KWStyle only works properly when specifying just one single input file. Thus, iterate and call KWStyle again
        # for each source file, each time appending to the result output.
        for source_file in source_files:
            cur_kwstyle_call = kwstyle_call[::]
            cur_kwstyle_call.append(source_file)
            try:
                output += subprocess.check_output(cur_kwstyle_call,
                                                  universal_newlines=True,
                                                  stderr=subprocess.STDOUT)
            except subprocess.CalledProcessError as error:
                # Same as with the lizard call. KWStyle exits with status 1 by default.
                # So catch that, ignore the exception, and keep the output of the command
                output += error.output
            except Exception:  # catch the rest and exclude the analysis tool from the score
                if not skip_on_failure:
                    raise
                return [0], "", False

        warning_count = KWStyleTool.get_warning_count(output)
        warning_rate = warning_count / lines_of_code

        util.write_into_file_string(strings.RESULTS_FILENAME_KWSTYLE, output)

        score = scoring.calculate_kwstyle_score_absolute(warning_rate)

        log = strings.RUN_KWSTYLE_HEADER + "\n"
        log += strings.RESULT_KWSTYLE_WARNING_RATE.format(
            warning_rate, warning_count, lines_of_code) + "\n"
        log += strings.DETAILLED_RESULTS_WRITTEN_INTO.format(
            strings.RESULTS_FILENAME_KWSTYLE) + "\n"
        log += scoring.get_score_string(score, 'KWStyle') + "\n"

        return [score], log, True
Пример #6
0
    def compile_with_make(program_dir_abs, excluded_paths):
        """
        Compile the program with infer using make to allow infer to analyze it later.
        :param program_dir_abs: The absolute path to the root directory of the target program.
        :param excluded_paths: A lst containing files to be excluded.
        :return: True if the compilation was successful
                 False if the compilation was not successful
        """
        exclude_args = InferTool.prepare_exclude_arguments(
            program_dir_abs, excluded_paths)
        infer_call = [TOOLS.INFER.exe_name, "capture"]
        infer_call.extend(exclude_args)
        infer_call.extend(["--", "make"])
        make_clean_call = ["make", "clean"]

        try:
            subprocess.check_output(make_clean_call,
                                    cwd=program_dir_abs,
                                    universal_newlines=True,
                                    stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError:  # not all makefiles have a clean option, pass if it doesn't exist
            pass

        try:
            subprocess.check_output(infer_call,
                                    cwd=program_dir_abs,
                                    universal_newlines=True,
                                    stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            util.write_into_file_string(
                strings.ERROR_FILENAME_INFER_COMPILATION,
                strings.INFER_COMPILATION_CRASHED.format(
                    e.returncode, e.output))
            print(
                strings.INFER_COMPILATION_CRASHED.format(
                    e.returncode,
                    strings.ERROR_LOG_WRITTEN_INTO.format(
                        strings.ERROR_FILENAME_INFER_COMPILATION)))
            print()
            return False
        return True
Пример #7
0
    def compile_with_cmake(program_dir_abs, excluded_paths):
        """
        Compile the program with infer using cmake to allow infer to analyze it later.
        :param program_dir_abs: The absolute path to the root directory of the target program.
        :param excluded_paths: A lst containing files to be excluded.
        :return: True if the compilation was successful
                 False if the compilation was not successful
        """
        build_path = util.create_build_directory(program_dir_abs,
                                                 build_dir_name="infer_build")
        util.clear_directory(build_path)

        infer_call_compile = ["infer", "compile", "--", "cmake", ".."]
        infer_call_capture = ["infer", "capture"]
        infer_call_capture.extend(
            InferTool.prepare_exclude_arguments(program_dir_abs,
                                                excluded_paths))
        infer_call_capture.extend(["--", "make"])

        try:
            subprocess.check_output(infer_call_compile,
                                    cwd=build_path,
                                    universal_newlines=True,
                                    stderr=subprocess.STDOUT)
            subprocess.check_output(infer_call_capture,
                                    cwd=build_path,
                                    universal_newlines=True,
                                    stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            util.write_into_file_string(
                strings.ERROR_FILENAME_INFER_COMPILATION,
                strings.INFER_COMPILATION_CRASHED.format(
                    e.returncode, e.output))
            print(
                strings.INFER_COMPILATION_CRASHED.format(
                    e.returncode,
                    strings.ERROR_LOG_WRITTEN_INTO.format(
                        strings.ERROR_FILENAME_INFER_COMPILATION)))
            print()
            return False
        return True
Пример #8
0
    def run(data, skip_on_failure=False):
        source_files = data["source_files"]
        """
        Runs Lizard.
        :param source_files: The lst of source files to analyze.
        :return: The cyclomatic complexity score, warning score, and unique score
        """
        # NOTE Although lizard can be imported as a python module it is actually easier to parse its output
        # (for now at least - this might of course change). This is because the module is not well documented so it's
        # hard to find out how exactly one can get _all_ information using it. Plus, this way we can check if it is
        # installed using shutil.which --> consistent with how we check for the other tools.

        lizard_call = [TOOLS.LIZARD.exe_name, '-Eduplicate', '-l', 'cpp']
        lizard_call.extend(source_files)

        try:
            output = subprocess.check_output(lizard_call,
                                             universal_newlines=True,
                                             stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as error:  # If warnings are generated, Lizard exits with exit code 1
            # Basically, this catches the exception and ignores it such that this tool doesn't crash
            # while still keeping the output of the command
            output = error.output
        except Exception:  # catch the rest and exclude the analysis tool from the score
            if not skip_on_failure:
                raise
            return [0, 0, 0], "", False

        lizard_output = LizardTool.filter_output(output)
        cyclomatic_complexity_score, warning_score, unique_score, temp = \
            lizard_output.get_information()
        util.write_into_file_string(strings.RESULTS_FILENAME_LIZARD, output)

        log = strings.RUN_LIZARD_HEADER + "\n"
        log += strings.DETAILLED_RESULTS_WRITTEN_INTO.format(
            strings.RESULTS_FILENAME_LIZARD) + "\n"
        log += temp

        return [cyclomatic_complexity_score, warning_score,
                unique_score], log, True
Пример #9
0
    def run(data, skip_on_failure=False):
        source_files = data["source_files"]
        lines_of_code = data["lines_of_code"]
        custom_asserts = data["custom_asserts"]

        assert_count = 0

        for path in source_files:
            file = open(path, 'r', encoding='latin-1')

            file_lines = file.readlines()
            for line in file_lines:
                if AssertionTool.is_assert(line, custom_asserts):
                    assert_count += 1
            file.close()

        assertion_rate = assert_count / lines_of_code

        detailled_result_string = strings.RESULT_ASSERTION_RATE_DETAILED.format(
            count=assert_count,
            loc=lines_of_code,
            rate=assertion_rate,
            percentage=100 * assertion_rate)

        util.write_into_file_string(strings.RESULTS_FILENAME_ASSERTION_CHECK,
                                    detailled_result_string)

        score = scoring.calculate_assertion_score_absolute(assertion_rate)

        log = strings.RUN_ASSERTION_CHECK_HEADER + "\n"
        log += strings.RESULT_ASSERTION_RATE.format(
            assertion_rate, assert_count, lines_of_code) + "\n"
        log += strings.DETAILLED_RESULTS_WRITTEN_INTO.format(
            strings.RESULTS_FILENAME_ASSERTION_CHECK) + "\n"
        log += scoring.get_score_string(score, 'Assertion') + "\n"

        return [score], log, True
Пример #10
0
    def run(data, skip_on_failure=False):
        program_dir_abs = data["program_dir_abs"] + "/softwipe_build/" if data[
            "use_cmake"] else data["program_dir_abs"]
        executefile = data["executefile"][0]
        lines_of_code = data["lines_of_code"]
        CompileTool.run(data)

        print(executefile)
        print("-------- RERUNNING COMPILATION FOR VALGRIND --------")
        print(strings.RUN_EXECUTION_WITH_SANITIZERS_HEADER)

        command = ["valgrind", "--error-exitcode=123"]
        output = ""

        print(program_dir_abs)

        if executefile and os.path.isfile(executefile):
            file = open(executefile, 'r')
            lines = file.readlines()
            file.close()

            command_line = os.path.join(program_dir_abs, lines[0])
            command.extend(command_line.split())

        # Execute and get stderr, which contains the output of the sanitizers
        print(command)

        try:
            output = subprocess.check_output(command,
                                             cwd=program_dir_abs,
                                             universal_newlines=True,
                                             stderr=subprocess.STDOUT)
        except FileNotFoundError as e1:
            print(e1)
            print(strings.EXECUTION_FILE_NOT_FOUND.format(command[1]))
            return [], "", False
        except subprocess.CalledProcessError as error:
            if error.returncode == 123:
                pass
            else:
                print(error.output)
                raise

        weighted_warnings = ValgrindTool.get_weighted_warning_count(output)
        warning_rate = weighted_warnings / lines_of_code
        warning_log = ValgrindTool.get_warning_log(output)
        score = scoring.calculate_valgrind_score_absolute(warning_rate)

        util.write_into_file_string(strings.RESULTS_FILENAME_VALGRIND, output)

        log = strings.RUN_VALGRIND_ANALYSIS_HEADER + "\n"
        log += warning_log + "\n"
        # TODO: make and print filename to user
        log += "Weighted Valgrind warning rate: {} ({}/{})".format(
            weighted_warnings / lines_of_code, weighted_warnings,
            lines_of_code) + "\n"
        log += scoring.get_score_string(score, ValgrindTool.name()) + "\n"
        log += strings.DETAILLED_RESULTS_WRITTEN_INTO.format(
            strings.RESULTS_FILENAME_VALGRIND)

        return [score], log, True
Пример #11
0
    def run(data, skip_on_failure=False):
        program_dir_abs = data["program_dir_abs"]
        lines_of_code = data["lines_of_code"]
        use_cmake = data["use_cmake"]
        use_make = data["use_make"]
        excluded_paths = data["excluded_paths"]
        compilation_status = False

        if use_cmake:
            program_dir_abs += "/" + strings.INFER_BUILD_DIR_NAME
            compilation_status = InferTool.compile_with_cmake(
                program_dir_abs, excluded_paths)
        elif use_make:
            compilation_status = InferTool.compile_with_make(
                program_dir_abs, excluded_paths)

        if not compilation_status:
            return [0], "", False

        # TODO: maybe fix the error handling differently (not by the --keep-going flag)
        infer_analyze = [TOOLS.INFER.exe_name, "analyze", "--keep-going"]

        try:
            subprocess.check_output(infer_analyze,
                                    cwd=program_dir_abs,
                                    universal_newlines=True,
                                    stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as error:
            print(
                strings.COMPILATION_CRASHED.format(error.returncode,
                                                   error.output))
            template = "An exception of type {0} occurred. Arguments:\n{1!r}"
            message = template.format(type(error).__name__, error.args)
            print(message)
            if not skip_on_failure:
                raise
            return [0], "", False
        except Exception:  # catch the rest and exclude the analysis tool from the score
            if not skip_on_failure:
                raise
            return [0], "", False

        infer_out_path = util.find_file(
            program_dir_abs,
            strings.INFER_OUTPUT_FILE_NAME,
            directory=strings.INFER_OUTPUT_DIR_NAME)
        if infer_out_path == "":
            return [0], "Could not find {}".format(
                strings.INFER_OUTPUT_FILE_NAME), False

        file_out, warnings, warning_num = InferTool.get_warnings_from_output(
            infer_out_path)
        util.write_into_file_string(strings.RESULTS_FILENAME_INFER, file_out)

        infer_warning_rate = warning_num / lines_of_code
        score = scoring.calculate_infer_score_absolute(infer_warning_rate)

        log = strings.RUN_INFER_ANALYSIS_HEADER + "\n"
        log += warnings + "\n"
        # TODO: make and print filename to user
        log += "Weighted Infer warning rate: {} ({}/{})".format(
            warning_num / lines_of_code, warning_num, lines_of_code) + "\n"
        log += scoring.get_score_string(score, 'Infer') + "\n"

        return [score], log, True