def test_print_audit_results_none( self, mock_printer, mock_baseline, expected_message, ): """ This doesn't actually test for correctness; we rely on good tests for determine_audit_results. """ with self.mock_env( baseline=mock_baseline, ), mock.patch.object( audit, 'determine_audit_results', return_value={}, ): audit.print_audit_results('somefilename') assert expected_message in mock_printer.message
def main(argv=None): if len(sys.argv) == 1: # pragma: no cover sys.argv.append('-h') args = parse_args(argv) if args.verbose: # pragma: no cover log.set_debug_level(args.verbose) if args.action == 'scan': # Plugins are *always* rescanned with fresh settings, because # we want to get the latest updates. plugins = initialize.from_parser_builder( args.plugins, exclude_lines_regex=args.exclude_lines, should_verify_secrets=not args.no_verify, ) if args.string: line = args.string if isinstance(args.string, bool): line = sys.stdin.read().splitlines()[0] _scan_string(line, plugins) else: baseline_dict = _perform_scan(args, plugins) if args.import_filename: write_baseline_to_file( filename=args.import_filename[0], data=baseline_dict, ) else: print( baseline.format_baseline_for_output( baseline_dict, ), ) elif args.action == 'audit': if not args.diff and not args.display_results: audit.audit_baseline(args.filename[0]) return 0 if args.display_results: audit.print_audit_results(args.filename[0]) return 0 if len(args.filename) != 2: print( 'Must specify two files to compare!', file=sys.stderr, ) return 1 try: audit.compare_baselines(args.filename[0], args.filename[1]) except audit.RedundantComparisonError: print( 'No difference, because it\'s the same file!', file=sys.stderr, ) return 0