def _output(self, results): b = PrintBuffer() b.separator() b.add_green("%s passed!\n" % self.title) b.add("Elapsed time: %.2fs\n" % results['elapsed_time']) b.separator() return str(b)
def launch_instructions(self, result_dir): b = PrintBuffer() b.separator() b.add("Full details can be seen in a browser by running:\n") b.add(" $ %s %s\n" % (self.binary_path, result_dir)) b.separator() return str(b)
def _output(self, results): if self.json: return super()._output(results) b = PrintBuffer() r = results b.separator() b.add("Took %.2f seconds to analyze with scan-build\n" % r['elapsed_time']) b.add("Found %d issues:\n" % len(r['issues'])) b.separator() return str(b)
def _output(self, results): if self.json: return super()._output(results) b = PrintBuffer() b.add(super()._output(results)) r = results issue_no = 0 for issue in r['issues']: b.add("%d: %s:%d:%d - %s\n" % (issue_no, issue['file'], issue['line'], issue['col'], issue['description'])) issue_no = issue_no + 1 if len(r['issues']) > 0: viewer = self.scan_build.viewer b.add(viewer.launch_instructions(r['results_directory'])) return str(b)
def _output(self, results): if self.json: return super()._output(results) b = PrintBuffer() b.add(super()._output(results)) r = results for issue in r['issues']: b.separator() b.add("An issue was found with ") b.add_red("%s\n" % issue['file_path']) b.add('Rule: "%s"\n\n' % issue['rule_title']) b.add('line %d:\n' % issue['line']['number']) b.add("%s" % issue['line']['context']) b.add(' ' * (issue['line']['character'] - 1)) b.add_red("^\n") b.separator() if len(r['issues']) == 0: b.add_green("No style issues found!\n") else: b.add_red("These issues can be fixed automatically by running:\n") b.add("$ basic_style.py fix [target [target ...]]\n") b.separator() return str(b)
def _output(self, results): if self.json: return super()._output(results) b = PrintBuffer() b.add(super()._output(results)) r = results b.add("%-32s %8.02fs\n" % ("Elapsed time:", r['elapsed_time'])) b.separator() for title, evaluation in sorted(r['rule_evaluation'].items()): b.add('"%s":\n' % title) b.add(' %-30s %s\n' % ("Applies to:", evaluation['extensions'])) b.add(' %-30s %8d\n' % ("Files examined:", evaluation['examined'])) b.add(' %-30s %8d\n' % ("Occurrences of issue:", evaluation['occurrences'])) b.add(' %-30s %8d\n\n' % ("Files with issue:", evaluation['files'])) b.separator() b.add("%-32s %8d\n" % ("Files scoring 100%", r['matching'])) b.add("%-32s %8d\n" % ("Files scoring <100%", r['not_matching'])) b.separator() b.add("Overall scoring:\n\n") score = FileStyleScore(r['lines_before'], r['lines_added'], r['lines_removed'], r['lines_unchanged'], r['lines_after']) b.add(str(score)) b.separator() return str(b)
def _output(self, results): if self.json: return super()._output(results) b = PrintBuffer() b.add(super()._output(results)) r = results for issue in r['issues']: b.add("An issue was found with ") b.add_red("%s" % issue['file_path']) b.add('\n\n%s\n\n' % issue['evaluation']['description']) b.add('Info for resolution:\n') b.add(issue['evaluation']['resolution']) b.separator() if len(r['issues']) == 0: b.add_green("No copyright header issues found!\n") return str(b)
def _output(self, results): if self.json: return super()._output(results) b = PrintBuffer() r = results b.add(super()._output(results)) b.add("%-70s %6d\n" % ("Files expected to have header:", r['hdr_expected'])) b.add("%-70s %6d\n" % ("Files not expected to have header:", r['no_hdr_expected'])) b.add("%-70s %6d\n" % ("Files expected to have 'copyright' occurrence outside header:", r['other_copyright_expected'])) b.add("%-70s %6d\n" % ("Files not expected to have 'copyright' occurrence outside " "header:", r['no_other_copyright_expected'])) b.add("%-70s %6d\n" % ("Files passed:", r['passed'])) b.add("%-70s %6d\n" % ("Files failed:", r['failed'])) b.separator() for key, value in sorted(r['issues'].items()): b.add("%-70s %6d\n" % ('"' + key + '":', value)) b.separator() return str(b)
def _output(self, results): if self.json: return super()._output(results) b = PrintBuffer() r = results b.separator() b.add("%4d files tracked in repo\n" % r['tracked_files']) b.add("%4d files in scope according to .bitcoin_maintainer_tools.json " "settings\n" % r['files_in_scope']) b.add("%4d files examined according to listed targets\n" % r['files_targeted']) b.add("%4d parallel jobs for computing analysis\n" % r['jobs']) b.separator() return str(b)
def _exit_if_parameters_unsupported(self): if self.force: return rejected_parameters = self.clang_format.style.rejected_parameters if len(rejected_parameters) > 0: b = PrintBuffer() b.add_red("\nERROR: ") b.add("clang-format version %s does not support all parameters " "given in\n%s\n\n" % (self.clang_format.binary_version, self.clang_format.style)) b.add("Unsupported parameters:\n") for parameter in rejected_parameters: b.add("\t%s\n" % parameter) # The applied formating has subtle differences that vary between # major releases of clang-format. The recommendation should # probably follow the latest widely-available stable release. repo_info = self['repository'].repo_info b.add( "\nUsing clang-format version %s or higher is recommended\n" % repo_info['clang_format_recommended']['min_version']) b.add("Use the --force option to override and proceed anyway.\n\n") b.flush() sys.exit("*** missing clang-format support.")
def _output(self, results): if self.json: return super()._output(results) b = PrintBuffer() b.add(super()._output(results)) r = results for f in r['failures']: b.add("A code format issue was detected in ") b.add_red("%s\n\n" % f['file_path']) score = FileStyleScore(f['lines_before'], f['lines_added'], f['lines_removed'], f['lines_unchanged'], f['lines_after']) b.add(str(score)) b.separator() if len(r['failures']) == 0: b.add_green("No format issues found!\n") else: b.add_red("These files can be formatted by running:\n") b.add("$ clang_format.py format [option [option ...]] " "[target [target ...]]\n") b.separator() return str(b)
def _output(self, results): if self.json: return super()._output(results) b = PrintBuffer() b.add(super()._output(results)) r = results b.add("%-30s %s\n" % ("clang-format bin:", r['clang_format_path'])) b.add("%-30s %s\n" % ("clang-format version:", r['clang_format_version'])) b.add("%-30s %s\n" % ("Using style in:", r['clang_style_path'])) b.separator() if len(r['rejected_parameters']) > 0: b.add_red("WARNING") b.add(" - This version of clang-format does not support the " "following style\nparameters, so they were not used:\n\n") for param in r['rejected_parameters']: b.add("%s\n" % param) b.separator() b.add("%-30s %.02fs\n" % ("Elapsed time:", r['elapsed_time'])) if len(r['slow_diffs']) > 0: b.add("Slowest diffs:\n") for slow in r['slow_diffs']: b.add("%6.02fs for %s\n" % (slow['diff_time'], slow['file_path'])) b.separator() b.add("%-30s %4d\n" % ("Files scoring 100%:", r['matching'])) b.add("%-30s %4d\n" % ("Files scoring <100%:", r['not_matching'])) b.add("%-30s %s\n" % ("Formatted Content MD5:", r['formatted_md5'])) b.separator() for score_range in reversed(sorted(r['files_in_ranges'].keys())): b.add("%-30s %4d\n" % ("Files scoring %s:" % score_range, r['files_in_ranges'][score_range])) b.separator() b.add("Overall scoring:\n\n") score = FileStyleScore(r['lines_before'], r['lines_added'], r['lines_removed'], r['lines_unchanged'], r['lines_after']) b.add(str(score)) b.separator() return str(b)
def _output(self, results): if self.json: return super()._output(results) b = PrintBuffer() b.add(super()._output(results)) r = results for issue in r['issues']: b.add("An issue has been found in ") b.add_red("%s:%d:%d\n" % (issue['file'], issue['line'], issue['col'])) b.add("Type: %s\n" % issue['type']) b.add("Description: %s\n\n" % issue['description']) event_no = 0 for event in issue['events']: b.add("%d: " % event_no) b.add("%s:%d:%d - " % (event['file'], event['line'], event['col'])) b.add("%s\n" % event['message']) event_no = event_no + 1 b.separator() if len(r['issues']) == 0: b.add_green("No static analysis issues found!\n") b.separator() else: viewer = self.scan_build.viewer b.add(viewer.launch_instructions(r['results_directory'])) return str(b)