Ejemplo n.º 1
0
    def run_tools(self, path_to_analyze: os.path, files_list: list,
                  output_dir: os.path):
        """
        'path_to_analyze' is used if 'files_list' is None, or if the tool
        needs the path to calculate the correct results.
        """

        outputs = {}

        if not files_list:
            self.files_to_analyze = list_of_files(path_to_analyze,
                                                  ACCEPTED_EXTENSIONS)
        else:
            self.files_to_analyze = files_list

        # Check extensions supported by tools
        filtered_files_per_tool = {}
        current_enabled_tools = [
            tool_name for tool_name in self._enabled_tools
        ]
        for tool_name in current_enabled_tools:
            filtered_files = _filter_unsupported_files(
                self.files_to_analyze, self._tool_extensions.get(tool_name))

            log_debug("\t{} FILES_LIST:\n{}", tool_name, filtered_files)

            if not filtered_files:
                self._enabled_tools.remove(tool_name)
            else:
                filtered_files_per_tool[tool_name] = filtered_files

        # rust-code-analysis can read just a single json file
        rust_code_analysis_file = filtered_files_per_tool.get(
            "rust-code-analysis", None)
        if (rust_code_analysis_file and len(rust_code_analysis_file) != 1
                and os.path.isdir(rust_code_analysis_file[0])):
            self._enabled_tools.remove("rust-code-analysis")

        for name in self._enabled_tools:
            self._run_tool(name, filtered_files_per_tool[name], outputs,
                           output_dir)

        self.raw_output = outputs
Ejemplo n.º 2
0
def _analyze_path(
    tool: Tools,
    path,
    accepted_extensions,
    run_n_parse_funct,
    output_dir,
    output_list: list,
):  # throws FileNotFoundError if path is wrong

    for f in os.listdir(path):
        ff = os.path.join(path, f)
        log_debug("\tpath: {}", f)
        if os.path.isdir(ff):  # If path is a DIR, recurse.
            log_debug("\t'_analyze_path': dir: {}", f)
            _analyze_path(
                tool,
                ff,
                accepted_extensions,
                run_n_parse_funct,
                output_dir,
                output_list,
            )

        elif os.path.isfile(ff):  # If path is a FILE, check its extension
            base_name = os.path.basename(f)
            extension = base_name[base_name.rfind(".") + 1:]
            if extension in accepted_extensions:
                log_debug("\t'_analyze_path': file: {}", f)
                parsed_result = run_n_parse_funct(ff, output_dir)
                output_list.append(parsed_result)
Ejemplo n.º 3
0
def main() -> None:
    parser = argparse.ArgumentParser(
        description="This tool compares metrics produced by others software "
        "and saved as json files",
        epilog="The manual and the source code of this program can be found on"
        " GitHub at https://github.com/SoftengPoliTo/SoftwareMetrics",
    )

    # Optional args
    parser.add_argument(
        "-v",
        "--verbosity",
        action="store_true",
        help="Increase output verbosity, useful for debugging purposes",
    )

    parser.add_argument(
        "-g",
        "--globals",
        action="store_true",
        help="Compare json files global metrics",
    )

    parser.add_argument(
        "-f",
        "--files",
        action="store_true",
        help="Compare json files file metrics",
    )

    parser.add_argument(
        "-fu",
        "--functions",
        action="store_true",
        help="Compare json files function metrics",
    )

    # Args
    parser.add_argument(
        "-m",
        "--metrics",
        type=str,
        nargs="*",
        required=True,
        help="List of metrics to be compared",
    )

    parser.add_argument(
        "-i",
        "--inputs",
        metavar="FILE.json",
        type=str,
        nargs=2,
        required=True,
        help="Json files to compare",
    )

    args = parser.parse_args()

    # If the -v option is enabled, the program is in debug mode
    log_conf(args.verbosity)
    log_debug("\targs={}", vars(args))

    # Check json files inserted by the user
    def check_json_name(name):
        if not os.path.isfile(name):
            log_err("\t{} is not a valid file.", ExitCode.WRONG_FILES, name)

    check_json_name(args.inputs[0])
    check_json_name(args.inputs[1])

    # Check metrics inserted by the user
    metrics = check_metrics(args.metrics)

    # Run comparison
    run_comparison(
        args.inputs[0],
        args.inputs[1],
        metrics,
        args.globals,
        args.files,
        args.functions,
    )
Ejemplo n.º 4
0
def cccc_output_reader(cccc_xml_directory_path: str):
    base_dir = os.path.realpath(cccc_xml_directory_path)
    per_function_res = []

    with open(os.path.join(cccc_xml_directory_path, "cccc.xml"),
              "r") as cccc_file:
        cccc_xml = minidom.parse(cccc_file)

    project = cccc_xml.getElementsByTagName("CCCC_Project")
    modules = (project[0].getElementsByTagName("oo_design")
               [0].getElementsByTagName("module"))
    for module in modules:
        module_name = module.getElementsByTagName(
            "name")[0].firstChild.nodeValue

        WMC = module.getElementsByTagName(
            "weighted_methods_per_class_unity")[0].getAttribute("value")
        DIT = module.getElementsByTagName(
            "depth_of_inheritance_tree")[0].getAttribute("value")
        NOC = module.getElementsByTagName(
            "number_of_children")[0].getAttribute("value")
        CBO = module.getElementsByTagName(
            "coupling_between_objects")[0].getAttribute("value")

        log_debug(
            "\tCCCC output reader. Reading path: {}",
            os.path.join(base_dir, module_name + ".xml"),
        )

        with open(os.path.join(base_dir, module_name + ".xml"),
                  "r") as moduleFile:
            module_xml = minidom.parse(moduleFile)

        CC_module = (module_xml.getElementsByTagName(
            "module_summary")[0].getElementsByTagName(
                "McCabes_cyclomatic_complexity")[0].getAttribute("value"))
        member_functions = module_xml.getElementsByTagName(
            "procedural_detail")[0].getElementsByTagName("member_function")

        list_of_member_functions: List[Dict[str, Any]] = []
        for member_function in member_functions:
            member_function_name = member_function.getElementsByTagName(
                "name")[0].firstChild.nodeValue

            file_in = None
            line_number = None
            definition_only = True
            for extent in member_function.getElementsByTagName("extent"):
                if (extent.getElementsByTagName("description")
                    [0].firstChild.nodeValue == "definition"):
                    definition_only = False
                    file_in = extent.getElementsByTagName(
                        "source_reference")[0].getAttribute("file")
                    line_number = extent.getElementsByTagName(
                        "source_reference")[0].getAttribute("line")
            if definition_only:
                # If it is not the implementation of the function, we skip it
                continue

            member_function_cc = member_function.getElementsByTagName(
                "McCabes_cyclomatic_complexity")[0].getAttribute("value")
            lines_of_code = member_function.getElementsByTagName(
                "lines_of_code")[0].getAttribute("value")
            lines_of_comment = member_function.getElementsByTagName(
                "lines_of_comment")[0].getAttribute("value")

            per_function_values = {
                "file": file_in,
                "line_number": line_number,
                "func_name": member_function_name,
                "functionCC": member_function_cc,
                "loc": lines_of_code,
                "cloc": lines_of_comment,
            }
            list_of_member_functions.append(per_function_values)

        per_module_metrics = {
            "CC": CC_module,
            "WMC": WMC,
            "DIT": DIT,
            "NOC": NOC,
            "CBO": CBO,
        }
        # {"filename": file_in, "func_name": func_name,
        # "line_number": line_number, "values": per_function_values}
        per_function_res.append({
            "module_name": module_name,
            "per_module_metrics": per_module_metrics,
            "functions": list_of_member_functions,
        })
    return per_function_res
Ejemplo n.º 5
0
def standardizer_cccc(data):
    # Support structures
    tmp_dict_files = {}
    tmp_dict_modules = {}

    # for d in data:
    #   for module in d:
    for module in data:
        # If there are no functions, the module represents a class which is not
        # defined in the files we analyzed.
        # Hence, all its stats are 0, and the other tools will not have those
        # entries, so we can omit it.

        # We could still put these in the 'global' section
        if len(module["functions"]) == 0:
            continue

        if module["module_name"] not in tmp_dict_modules:
            tmp_dict_modules[
                module["module_name"]
            ] = {  # It's going to be added in the 'global' section
                "class name": module["module_name"],
                "CC": module["per_module_metrics"]["CC"],
                "C&K": {
                    "WMC": module["per_module_metrics"]["WMC"],
                    "DIT": module["per_module_metrics"]["DIT"],
                    "NOC": module["per_module_metrics"]["NOC"],
                    "CBO": module["per_module_metrics"]["CBO"],
                },
            }

        for func in module["functions"]:
            if (func["file"]
                    not in tmp_dict_files):  # Create new per_file struct
                per_file = {"filename": func["file"], "functions": []}
                tmp_dict_files[func["file"]] = per_file
            else:
                per_file = tmp_dict_files[func["file"]]

            per_func = None
            for i in per_file["functions"]:
                if i["line number"] == func["line_number"]:
                    per_func = i
                    break

            if per_func is None:  # New function
                per_func = {
                    "function name": re.sub(r"\([^)]*\)", "",
                                            func["func_name"]),
                    "line number": func["line_number"],
                    "LOC": int(func["loc"]),  # LOC
                    "CLOC": int(func["cloc"]),
                    "CC": float(func["functionCC"]),
                    "class name": module[
                        "module_name"],  # The function is part of this module
                }
                per_file["functions"].append(per_func)

            else:
                log_debug(
                    "\t_standardizer_cccc() warning: same function found twice."
                    "\n\tanalyzed function:"
                    "\n\t{}\n"
                    "\talready present function:\n\t{}",
                    func,
                    per_func,
                )

    formatted_output = {"classes": [], "files": []}

    for module in tmp_dict_modules:
        if (module != "anonymous"
            ):  # Do not add the per_module stats if in "anonymous"
            formatted_output["classes"].append(tmp_dict_modules[module])

    for file in tmp_dict_files.values():
        formatted_output["files"].append(file)

    return formatted_output
Ejemplo n.º 6
0
def analyze(
    enabled_tools,
    test_mode,
    path_to_analyze=None,
    files_list=None,
    results_dir=".",
    tools_path="./CC++_Tools",
):
    if path_to_analyze is None and files_list is None:
        log_err(
            "\teither a path to analyze, or "
            "a list of files must be passed to function 'analyze'.",
            ExitCode.PROGRAMMING_ERROR,
        )

    if not os.path.isdir(results_dir):
        log_err(
            "\tthe results path ( {} ) does not exists.",
            ExitCode.TARGET_DIR_NOT_FOUND,
            results_dir,
        )

    t = tools.Tools(tools_path)

    if enabled_tools:
        tool = t.get_tools()
        correct_tools = check_tools_correctness(tool, enabled_tools)
        t.set_enabled_tools(correct_tools)

    t.check_tools_existence()

    # Checking for analyzable files.
    if path_to_analyze is not None and not tools.list_of_files(
            path_to_analyze, tools.ACCEPTED_EXTENSIONS):
        log_err(
            "\tthe given path does not contain any of the supported files.\n"
            "\tBe sure to pass the right folder to analyze.",
            ExitCode.NO_SUPPORTED_FILES_FOUND,
        )

    output_dir = results_dir
    if test_mode:
        output_name = os.path.basename(os.path.normpath(results_dir))
    else:
        # The output folder in which all the output data will be placed
        output_name = datetime.datetime.now().strftime(
            "results_%Y.%m.%d_%H.%M.%S")

        output_dir = os.path.join(output_dir, output_name)

        # In case of an already existing path, add trailing '_'
        while os.path.exists(output_dir):
            output_dir = output_dir + "_"
        os.mkdir(output_dir)

    log_debug("\tOK, in output dir: {}", output_dir)
    if path_to_analyze is not None:
        log_debug("\tpathToAnalyze: {}", path_to_analyze)
    else:
        log_debug("\tfiles_list: {}", files_list)
    log_debug("")

    # RUNNING THE EXTERNAL TOOLS
    t.run_tools(path_to_analyze, files_list, output_dir)

    log_debug(
        "\tRAW RESULTS:\n"
        "TOKEI:\n {} "
        "\n\nRUST-CODE-ANALYSIS:\n {}"
        "\n\nCCCC:\n"
        "{}"
        "\n\nMI:\n {}"
        "\n\nHALSTEAD:\n {}\n",
        t.get_tool_output("tokei"),
        t.get_tool_output("rust-code-analysis"),
        t.get_tool_output("cccc"),
        t.get_tool_output("mi"),
        t.get_tool_output("halstead"),
    )

    formatted_outputs = t.get_output(test_mode)

    json_outputs = {}
    if test_mode:
        for tool in t.get_enabled_tools():
            output_final_name = (tool + "_" +
                                 pathlib.Path(path_to_analyze).name + ".json")
            path = os.path.join(output_dir, output_final_name)
            json_output = save_output(formatted_outputs[tool], path)
            json_outputs[tool] = json_output
    else:
        path = os.path.join(output_dir, output_name + ".json")
        json_output = save_output(formatted_outputs["all"], path)
        json_outputs["all"] = json_output

    print("Results have been written in folder: '" + output_name + "'")

    return json_outputs
Ejemplo n.º 7
0
def main():
    parser = argparse.ArgumentParser(
        description="A program to calculate various source code metrics, "
        "aggregating the results obtained from different tools.",
        epilog="The manual and the source code of this program can be found on"
        " GitHub at https://github.com/SoftengPoliTo/SoftwareMetrics",
    )

    # Optional args
    parser.add_argument(
        "-v",
        "--verbosity",
        action="store_true",
        help="Increase output verbosity, useful for debugging purposes",
    )

    parser.add_argument(
        "-tm",
        "--test-mode",
        action="store_true",
        help="Run the analyzer in test mode",
    )

    parser.add_argument(
        "-t",
        "--tools",
        type=str,
        nargs="+",
        help="List of tools to be executed",
    )

    # Args
    parser.add_argument(
        "results_dir",  # "-r", "--results",
        metavar="results dir",
        type=str,
        default=".",
        nargs="?",
        help="The directory in which to save the results",
    )

    # Input from paths OR compile_commands.json
    paths_or_json = parser.add_mutually_exclusive_group(required=True)
    # required=True means that at least one option must be present

    paths_or_json.add_argument(
        "-p",
        "--path",  # "paths_to_analyze",
        type=str,
        help="The PATH (directory(ies) / file(s)) to analyze",
    )

    paths_or_json.add_argument(
        "-c",
        type=str,
        metavar="FILE.json",  # action="store",
        help="The path to the 'compile_commands.json' file",
    )

    args = parser.parse_args()

    log_conf(args.verbosity)

    log_debug("\targs={}", vars(args))
    print(os.path.dirname(os.path.realpath(sys.argv[0])))

    files_list = None
    if args.c is not None:
        files_list = compile_commands_reader(args.c)

    analyze(
        args.tools,
        args.test_mode,
        path_to_analyze=args.path,
        files_list=files_list,
        tools_path=os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])),
                                "tools"),
        results_dir=args.results_dir,
    )