def run(runner, the_config): """Main common runner for serial and parallel""" compdb = compdb_parser.load_compdb(the_config.compdb) the_summary = summary.get_summary() if compdb: the_summary = runner(the_config, compdb) else: logging.error("Could not load compdb") sys.exit("ERROR: Loading of compdb failed") return the_summary
def extract_violation(token, rule_id, message): location = token.location if not location.file: logging.warning("Missing source location for '%s', skipping", token.kind) return None if utils.shall_ignore_based_on_file_name(location.file.name): logging.debug("Ignoring violation for id '%s' from file '%s'", rule_id, location.file.name) summary.get_summary().add_skipped_filename(utils.only_filename(location.file.name)) return None violation = violations.Violation( rule_id, message, location.file.name, location.line, location.column, ) summary.get_summary().add_analyzed_filename(location.file.name) present_violation(violation) return violation
def create_translation_unit(source_file, args=[], directory=None): """Use clang parser to parse the source file with given args. Returns the translation unit on success, None on failure""" index = cindex.Index.create() args = DEFAULT_ARGUMENTS + args try: args = clean_args(args) args = absolute_path_include(args, directory) logging.debug("Parsing '%s' with args '%s'", utils.only_filename(source_file), args) translation_unit = index.parse(source_file, args=args) diagnostics.log_diagnostics_info_summary(translation_unit) return translation_unit except cindex.TranslationUnitLoadError as the_exception: logging.error(the_exception) logging.error("Failed to parse '%s'", utils.only_filename(source_file)) logging.debug(the_exception, exc_info=True) summary.get_summary().add_failed_translation_units(source_file) return None
def apply_checkers_for_translation_unit(translation_unit, the_config): """For each translation unit, apply the checkers""" # TODO Do not differentiation between tu and token based checker violations_per_tu = [] if translation_unit: logging.info(colored("Applying checkers for '%s'", "magenta"), utils.only_filename(translation_unit.spelling)) summary.get_summary().add_analyzed_translation_unit(translation_unit.spelling) # Apply the checkers for the_checker in the_config.active_checkers: violations_per_tu += checker.apply_checker(translation_unit, the_checker) # Always apply the clang warning checker clang_warnings = clang_warning_checker.check_for_clang_warnings(translation_unit) logging.critical( colored("Translation Unit '%s' has %d violation(s) and %d clang warning(s)", "red"), utils.only_filename(translation_unit.spelling), len(violations_per_tu), len(clang_warnings), ) diags = diagnostics.get_diagnostics_by_severity_one_tu(translation_unit) summary.get_summary().add_number_of_diagnostics(translation_unit.spelling, diags) summary.get_summary().add_number_of_violations( translation_unit.spelling, (len(violations_per_tu), len(clang_warnings)) ) else: logging.warning("Skipping invalid translation unit") return violations_per_tu
def parse_commands(commands, list_of_files=None): """Parse commands and returns a list of translation units""" translation_units = [] number_of_skipped_files = 0 for command in commands: # Check if we want to parse the translation unit based on the file name pattern if list_of_files and not is_included_in_files_filter( command, list_of_files): summary.get_summary().add_skipped_commands(command.filename) continue if utils.shall_ignore_based_on_file_name(command.filename): logging.debug("Skipping external file '%s'", command.filename) number_of_skipped_files += 1 summary.get_summary().add_skipped_filename(command.filename) continue translation_unit = parse_single_command(command) translation_units.append(translation_unit) if number_of_skipped_files > 0: logging.info("Skipped %d file(s)", number_of_skipped_files) return translation_units
def apply_checker(translation_unit, checker): # TODO Most checkers traverse the tu again and again, this can be speed up violations = [] log_progress_for_checker(translation_unit, checker.__module__) # Only check non-external translation units if utils.shall_ignore_based_on_file_name(translation_unit.spelling): logging.debug("Ignoring translation unit '%s'", utils.only_filename(translation_unit.spelling)) summary.get_summary().add_ignored_translation_unit(utils.only_filename(translation_unit.spelling)) return [] # Decide based on the name of the function on which level the check shall be applied if checker.__name__ == "token_based_rule": for token in translation_unit.cursor.walk_preorder(): violation = checker(token) if violation: violations.append(violation) elif checker.__name__ == "translation_unit_based_rule": violations = checker(translation_unit) # TODO Consider returning diagnostics return violations
def single_run(args): """Execution of a single worker, similar to the serial runner""" start, end, commands, function_args = args the_config = function_args worker_id = multiprocessing.current_process()._identity[0] logger.setup_logger(the_config.log_level, f"{the_config.log_file}_{worker_id}") logging.debug("Spawning worker with id %s", worker_id) clang_setup.setup_clang() violations = facade_lib.apply_checkers_for_commands(commands[start:end], the_config) # TODO Return the violations return [summary.get_summary()]
def run_serial(the_config, compdb): """Executes a serial run, one command after another""" commands = compdb_parser.parse_compdb(compdb) facade_lib.apply_checkers_for_commands(commands, the_config) # TODO The global state of summary is hard to test return summary.get_summary()