def run_tool_rust_code_analysis(self, single_file: str, output_dir: str): try: args = [ self.rust_code_analysis_path, "-m", "-O", "json", "--pr", "-p", ] args.append(single_file) results = subprocess.run(args, capture_output=True, check=True) input_name = pathlib.Path(single_file).name filename = "rca-json/" + input_name + ".json" with open(filename, "w") as f: f.write(results.stdout.decode("utf-8")) return results.stdout except subprocess.CalledProcessError as ex: log_err( "\trust-code-analysis exited with an error.\n{}\n{}\n", ExitCode.RUST_CODE_ANALYSIS_TOOL_ERR, ex.stdout, ex.stderr, )
def check_missing_field(condition: T.Any, field: str, filename: str) -> None: """Check missing field""" if not condition: log_err( "\n\n{} does not have the '" + field + "' field", ExitCode.PROGRAMMING_ERROR, filename, )
def run_tool_mi(self, files_list: list): try: args = [self.mi_path, "-X"] args.extend(files_list) results = subprocess.run(args, capture_output=True, check=True) return results.stdout except subprocess.CalledProcessError as ex: log_err( "\tMaintainability Index Tool exited with an error.\n{}\n{}\n", ExitCode.MI_TOOL_ERR, ex.stdout, ex.stderr, )
def run_tool_tokei(self, files_list: list): try: args = [self.tokei_path, "-o", "json"] args.extend(files_list) results = subprocess.run(args, capture_output=True, check=True) return results.stdout except subprocess.CalledProcessError as ex: log_err( "\tTokei exited with an error.\n{}\n{}\n", ExitCode.TOKEI_TOOL_ERR, ex.stdout, ex.stderr, )
def run_tool_cccc(self, files_list: list, output_dir: str): try: output_subdir = self._output_subdir(output_dir) args = [self.cccc_path, "--outdir=" + output_subdir] args.extend(files_list) return subprocess.run(args, capture_output=True, check=True) except subprocess.CalledProcessError as ex: log_err( "\tCCCC exited with an error.\n{}\n{}\n", ExitCode.CCCC_TOOL_ERR, ex.stdout, ex.stderr, )
def _compare_two_metric( self, metric_first_file: T.Optional[int], metric_second_file: T.Optional[int], metric_as_string: str, ) -> None: if metric_first_file is None: log_err( "\n\n{} metric not found in {}", ExitCode.METRIC_NOT_FOUND, metric_as_string, self.first_json_filename, ) if metric_second_file is None: log_err( "\n\n{} metric not found in {}", ExitCode.METRIC_NOT_FOUND, metric_as_string, self.second_json_filename, ) if self._check_metrics_types(metric_first_file, metric_second_file): log_err( "\n\n{} metric is different\n\n" "{}: {}\n" "{}: {}", ExitCode.DIFFERENT_METRIC_VALUE, metric_as_string, self.first_json_filename, metric_first_file, self.second_json_filename, metric_second_file, )
def compile_commands_reader(json_file: os.path) -> list: if not os.path.isfile(json_file): log_err( "\t'{}' is not a valid file. Check the path you have provided.", ExitCode.COMPILE_COMMAND_FILE_ERROR, json_file, ) base_name = os.path.basename(json_file) if base_name[base_name.rfind(".") + 1:] != "json": log_err( "\t'{}' is not a Json file.", ExitCode.COMPILE_COMMAND_FILE_ERROR, json_file, ) with open(json_file, "r") as json_fp: c_commands = json.load(json_fp) files = [] try: for i in c_commands: files.append(os.path.join(i["directory"], i["file"])) except KeyError: log_err( "\t'{}' is not a valid \"compile_commands\" file.", ExitCode.COMPILE_COMMAND_FILE_ERROR, json_file, ) return list(dict.fromkeys(files).keys())
def compute_metrics(directory: str, filename: str) -> None: input_file = INPUT_DIR / directory / filename output_file = RESULTS_DIR / directory ret_value = run_subprocess( "./analyzer.py", "-tm", "-t", "rust-code-analysis", "-p", input_file, output_file, ) if ret_value.returncode != 0: log_err( "\tAn error occurred computing the metrics for {}", ExitCode.PROGRAMMING_ERROR, input_file, ) sys.exit(1)
def run_tool_halstead(self, path_to_analyze: str): try: results = subprocess.run( [ "/usr/bin/java", "-Duser.country=US", "-Duser.language=en", "-jar", self.halstead_path, path_to_analyze, ], capture_output=True, check=True, ) return results.stdout except subprocess.CalledProcessError as ex: log_err( "\tHalstead Metric Tool exited with an error.\n{}\n{}\n", ExitCode.HALSTEAD_TOOL_ERR, ex.stdout, ex.stderr, )
def check_tools_existence(self): if not os.path.isdir(self.baseDir): log_err( "\tThe directory containing the tools ({}) does not exists.", ExitCode.TOOLS_DIR_NOT_FOUND, self.baseDir, ) tool_path = { "tokei": self.tokei_tool.tokei_path, "rust-code-analysis": self.rca_tool.rust_code_analysis_path, "cccc": self.cccc_tool.cccc_path, "mi": self.mi_tool.mi_path, "halstead": self.halstead_tool.halstead_path, } for name in self._enabled_tools: if os.path.isfile(tool_path.get(name)) is False: log_err( "\tOne or more tools are missing.\n" "Check the directory containing the tools ({}).", ExitCode.TOOLS_NOT_FOUND, self.baseDir, )
def check_json_name(name): if not os.path.isfile(name): log_err("\t{} is not a valid file.", ExitCode.WRONG_FILES, name)
def analyze( enabled_tools, test_mode, path_to_analyze=None, files_list=None, results_dir=".", tools_path="./CC++_Tools", ): if path_to_analyze is None and files_list is None: log_err( "\teither a path to analyze, or " "a list of files must be passed to function 'analyze'.", ExitCode.PROGRAMMING_ERROR, ) if not os.path.isdir(results_dir): log_err( "\tthe results path ( {} ) does not exists.", ExitCode.TARGET_DIR_NOT_FOUND, results_dir, ) t = tools.Tools(tools_path) if enabled_tools: tool = t.get_tools() correct_tools = check_tools_correctness(tool, enabled_tools) t.set_enabled_tools(correct_tools) t.check_tools_existence() # Checking for analyzable files. if path_to_analyze is not None and not tools.list_of_files( path_to_analyze, tools.ACCEPTED_EXTENSIONS): log_err( "\tthe given path does not contain any of the supported files.\n" "\tBe sure to pass the right folder to analyze.", ExitCode.NO_SUPPORTED_FILES_FOUND, ) output_dir = results_dir if test_mode: output_name = os.path.basename(os.path.normpath(results_dir)) else: # The output folder in which all the output data will be placed output_name = datetime.datetime.now().strftime( "results_%Y.%m.%d_%H.%M.%S") output_dir = os.path.join(output_dir, output_name) # In case of an already existing path, add trailing '_' while os.path.exists(output_dir): output_dir = output_dir + "_" os.mkdir(output_dir) log_debug("\tOK, in output dir: {}", output_dir) if path_to_analyze is not None: log_debug("\tpathToAnalyze: {}", path_to_analyze) else: log_debug("\tfiles_list: {}", files_list) log_debug("") # RUNNING THE EXTERNAL TOOLS t.run_tools(path_to_analyze, files_list, output_dir) log_debug( "\tRAW RESULTS:\n" "TOKEI:\n {} " "\n\nRUST-CODE-ANALYSIS:\n {}" "\n\nCCCC:\n" "{}" "\n\nMI:\n {}" "\n\nHALSTEAD:\n {}\n", t.get_tool_output("tokei"), t.get_tool_output("rust-code-analysis"), t.get_tool_output("cccc"), t.get_tool_output("mi"), t.get_tool_output("halstead"), ) formatted_outputs = t.get_output(test_mode) json_outputs = {} if test_mode: for tool in t.get_enabled_tools(): output_final_name = (tool + "_" + pathlib.Path(path_to_analyze).name + ".json") path = os.path.join(output_dir, output_final_name) json_output = save_output(formatted_outputs[tool], path) json_outputs[tool] = json_output else: path = os.path.join(output_dir, output_name + ".json") json_output = save_output(formatted_outputs["all"], path) json_outputs["all"] = json_output print("Results have been written in folder: '" + output_name + "'") return json_outputs