def setUp(self): super().setUp() conf = config.BanditConfig() self.manager = manager.BanditManager(conf, "custom") (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.context = { "filename": self.tmp_fname, "lineno": 4, "linerange": [4], "col_offset": 30, } self.check_name = "hardcoded_bind_all_interfaces" self.issue = issue.Issue( bandit.MEDIUM, bandit.MEDIUM, text="Possible binding to all interfaces.", ) self.manager.out_file = self.tmp_fname self.issue.fname = self.context["filename"] self.issue.lineno = self.context["lineno"] self.issue.linerange = self.context["linerange"] self.issue.col_offset = self.context["col_offset"] self.issue.test = self.check_name self.manager.results.append(self.issue)
def setUp(self): super(YamlFormatterTests, self).setUp() conf = config.BanditConfig() self.manager = manager.BanditManager(conf, 'file') (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.context = {'filename': self.tmp_fname, 'lineno': 4, 'linerange': [4]} self.check_name = 'hardcoded_bind_all_interfaces' self.issue = issue.Issue(bandit.MEDIUM, bandit.MEDIUM, 'Possible binding to all interfaces.') self.candidates = [issue.Issue(bandit.LOW, bandit.LOW, 'Candidate A', lineno=1), issue.Issue(bandit.HIGH, bandit.HIGH, 'Candiate B', lineno=2)] self.manager.out_file = self.tmp_fname self.issue.fname = self.context['filename'] self.issue.lineno = self.context['lineno'] self.issue.linerange = self.context['linerange'] self.issue.test = self.check_name self.manager.results.append(self.issue) self.manager.metrics = metrics.Metrics() # mock up the metrics for key in ['_totals', 'binding.py']: self.manager.metrics.data[key] = {'loc': 4, 'nosec': 2} for (criteria, default) in constants.CRITERIA: for rank in constants.RANKING: self.manager.metrics.data[key]['{0}.{1}'.format( criteria, rank )] = 0
def setUp(self): super().setUp() conf = config.BanditConfig() self.manager = manager.BanditManager(conf, "file") (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.context = { "filename": self.tmp_fname, "lineno": 4, "linerange": [4], } self.check_name = "hardcoded_bind_all_interfaces" self.issue = issue.Issue(bandit.MEDIUM, bandit.MEDIUM, "Possible binding to all interfaces.") self.candidates = [ issue.Issue(bandit.LOW, bandit.LOW, "Candidate A", lineno=1), issue.Issue(bandit.HIGH, bandit.HIGH, "Candiate B", lineno=2), ] self.manager.out_file = self.tmp_fname self.issue.fname = self.context["filename"] self.issue.lineno = self.context["lineno"] self.issue.linerange = self.context["linerange"] self.issue.test = self.check_name self.manager.results.append(self.issue) self.manager.metrics = metrics.Metrics() # mock up the metrics for key in ["_totals", "binding.py"]: self.manager.metrics.data[key] = {"loc": 4, "nosec": 2} for (criteria, default) in constants.CRITERIA: for rank in constants.RANKING: self.manager.metrics.data[key][f"{criteria}.{rank}"] = 0
def setUp(self): super().setUp() conf = config.BanditConfig() self.manager = manager.BanditManager(conf, "file") (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname
def setUp(self): super(HtmlFormatterTests, self).setUp() conf = config.BanditConfig() self.manager = manager.BanditManager(conf, 'file') (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname
def test_create_manager_with_profile(self): # make sure we can create a manager m = manager.BanditManager(config=self.config, agg_type='file', debug=False, verbose=False, profile=self.profile) self.assertEqual(False, m.debug) self.assertEqual(False, m.verbose) self.assertEqual('file', m.agg_type)
def setUp(self): super(FunctionalTests, self).setUp() # NOTE(tkelsey): bandit is very sensitive to paths, so stitch # them up here for the testing environment. # path = os.path.join(os.getcwd(), 'bandit', 'plugins') b_conf = b_config.BanditConfig() self.b_mgr = b_manager.BanditManager(b_conf, 'file') self.b_mgr.b_conf._settings['plugins_dir'] = path self.b_mgr.b_ts = b_test_set.BanditTestSet(config=b_conf)
def setUp(self): super().setUp() # NOTE(tkelsey): bandit is very sensitive to paths, so stitch # them up here for the testing environment. # path = os.path.join(os.getcwd(), "bandit", "plugins") b_conf = b_config.BanditConfig() self.b_mgr = b_manager.BanditManager(b_conf, "file") self.b_mgr.b_conf._settings["plugins_dir"] = path self.b_mgr.b_ts = b_test_set.BanditTestSet(config=b_conf)
def setUp(self): super(HtmlFormatterTests, self).setUp() cfg_file = os.path.join(os.getcwd(), 'bandit/config/bandit.yaml') conf = config.BanditConfig(cfg_file) self.manager = manager.BanditManager(conf, 'file') (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname
def test_create_manager_with_test_name_list(self): # make sure we can create a manager m = manager.BanditManager(config=self.config, agg_type='file', debug=False, verbose=False, profile_name=['exec_used', 'paramiko_calls']) self.assertEqual(m.debug, False) self.assertEqual(m.verbose, False) self.assertEqual(m.agg_type, 'file')
def test_create_manager_with_profile(self): # make sure we can create a manager m = manager.BanditManager(config=self.config, agg_type='file', debug=False, verbose=False, profile_name='Test') self.assertEqual(m.debug, False) self.assertEqual(m.verbose, False) self.assertEqual(m.agg_type, 'file')
def test_create_manager_with_invalid_test_list(self): # make sure we can create a manager m = manager.BanditManager(config=self.config, agg_type='file', debug=False, verbose=False, profile_name=['bogus']) self.assertEqual(m.debug, False) self.assertEqual(m.verbose, False) self.assertEqual(m.agg_type, 'file') self.assertFalse(m.has_tests)
def setUp(self): super(ManagerTests, self).setUp() self.profile = {} self.profile['include'] = { 'any_other_function_with_shell_equals_true', 'assert_used'} self.config = config.BanditConfig() self.manager = manager.BanditManager(config=self.config, agg_type='file', debug=False, verbose=False)
def test_create_manager_with_profile_bad(self): try: m = manager.BanditManager(config=self.config, agg_type='file', debug=False, verbose=False, profile_name='Bad') except utils.ProfileNotFound as e: err = str(e) self.assertTrue( err.startswith("Unable to find profile (Bad) in config file:"))
def setUp(self): super().setUp() self.profile = {} self.profile["include"] = { "any_other_function_with_shell_equals_true", "assert_used", } self.config = config.BanditConfig() self.manager = manager.BanditManager(config=self.config, agg_type="file", debug=False, verbose=False)
def test_no_issues(self, get_issue_list): conf = config.BanditConfig() self.manager = manager.BanditManager(conf, 'file') (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname get_issue_list.return_value = collections.OrderedDict() with mock.patch('bandit.formatters.screen.do_print') as m: tmp_file = open(self.tmp_fname, 'w') screen.report(self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5) self.assertIn('No issues identified.', '\n'.join([str(a) for a in m.call_args]))
def test_no_issues(self, get_issue_list): conf = config.BanditConfig() self.manager = manager.BanditManager(conf, 'file') (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname get_issue_list.return_value = collections.OrderedDict() tmp_file = open(self.tmp_fname, 'w') b_text.report(self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5) with open(self.tmp_fname) as f: data = f.read() self.assertIn('No issues identified.', data)
def test_no_issues(self, get_issue_list): cfg_file = os.path.join(os.getcwd(), 'bandit/config/bandit.yaml') conf = config.BanditConfig(cfg_file) self.manager = manager.BanditManager(conf, 'file') (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname get_issue_list.return_value = OrderedDict() b_text.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW, lines=5) with open(self.tmp_fname) as f: data = f.read() self.assertIn('No issues identified.', data)
def setUp(self): super(ManagerTests, self).setUp() contents = """ profiles: Test: include: - any_other_function_with_shell_equals_true - assert_used """ f = self.useFixture(TempFile(contents)) self.config = config.BanditConfig(f.name) self.manager = manager.BanditManager(config=self.config, agg_type='file', debug=False, verbose=False, profile_name=None)
def test_no_issues(self, get_issue_list): cfg_file = os.path.join(os.getcwd(), 'bandit/config/bandit.yaml') conf = config.BanditConfig(cfg_file) self.manager = manager.BanditManager(conf, 'file') (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname get_issue_list.return_value = OrderedDict() with mock.patch('bandit.formatters.screen.do_print') as m: screen.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW, lines=5) self.assertIn('No issues identified.', '\n'.join([str(a) for a in m.call_args]))
def _check_source(self): b_conf = b_config.BanditConfig() b_mgr = b_manager.BanditManager(b_conf, 'file', False) b_mgr.discover_files([self.filename]) b_mgr.run_tests() issues = [] for item in b_mgr.get_issue_list(): i = {} i["test_id"] = item.test_id i["issue_text"] = item.text i["line_number"] = item.lineno issues.append(i) try: os.remove("tempbanditpythonfile.py") except Exception as e: self.error = e return issues
def test_no_issues(self, get_issue_list): conf = config.BanditConfig() self.manager = manager.BanditManager(conf, "file") (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname get_issue_list.return_value = collections.OrderedDict() with mock.patch("bandit.formatters.screen.do_print") as m: with open(self.tmp_fname, "w") as tmp_file: screen.report( self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5 ) self.assertIn( "No issues identified.", "\n".join([str(a) for a in m.call_args]), )
def setUp(self): super(CsvFormatterTests, self).setUp() conf = config.BanditConfig() self.manager = manager.BanditManager(conf, 'file') (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.context = {'filename': self.tmp_fname, 'lineno': 4, 'linerange': [4]} self.check_name = 'hardcoded_bind_all_interfaces' self.issue = issue.Issue(bandit.MEDIUM, bandit.MEDIUM, 'Possible binding to all interfaces.') self.manager.out_file = self.tmp_fname self.issue.fname = self.context['filename'] self.issue.lineno = self.context['lineno'] self.issue.linerange = self.context['linerange'] self.issue.test = self.check_name self.manager.results.append(self.issue)
def test_report_baseline(self, get_issue_list): conf = config.BanditConfig() self.manager = manager.BanditManager(conf, "file") (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname issue_a = _get_issue_instance() issue_b = _get_issue_instance() issue_x = _get_issue_instance() issue_x.fname = "x" issue_y = _get_issue_instance() issue_y.fname = "y" issue_z = _get_issue_instance() issue_z.fname = "z" get_issue_list.return_value = collections.OrderedDict([ (issue_a, [issue_x]), (issue_b, [issue_y, issue_z]) ]) # Validate that we're outputting the correct issues indent_val = " " * 10 output_str_fn = "bandit.formatters.text._output_issue_str" with mock.patch(output_str_fn) as output_str: output_str.return_value = "ISSUE_OUTPUT_TEXT" with open(self.tmp_fname, "w") as tmp_file: b_text.report(self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5) calls = [ mock.call(issue_a, "", lines=5), mock.call(issue_b, "", show_code=False, show_lineno=False), mock.call(issue_y, indent_val, lines=5), mock.call(issue_z, indent_val, lines=5), ] output_str.assert_has_calls(calls, any_order=True)
def test_report_baseline(self, get_issue_list): cfg_file = os.path.join(os.getcwd(), 'bandit/config/bandit.yaml') conf = config.BanditConfig(cfg_file) self.manager = manager.BanditManager(conf, 'file') (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname issue_a = _get_issue_instance() issue_b = _get_issue_instance() issue_x = _get_issue_instance() issue_x.fname = 'x' issue_y = _get_issue_instance() issue_y.fname = 'y' issue_z = _get_issue_instance() issue_z.fname = 'z' get_issue_list.return_value = OrderedDict([(issue_a, [issue_x]), (issue_b, [issue_y, issue_z])]) # Validate that we're outputting the correct issues indent_val = ' ' * 10 output_str_fn = 'bandit.formatters.screen._output_issue_str' with mock.patch(output_str_fn) as output_str: output_str.return_value = 'ISSUE_OUTPUT_TEXT' screen.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW, lines=5) calls = [ mock.call(issue_a, '', lines=5), mock.call(issue_b, '', show_code=False, show_lineno=False), mock.call(issue_y, indent_val, lines=5), mock.call(issue_z, indent_val, lines=5) ] output_str.assert_has_calls(calls, any_order=True)
def test_report_nobaseline(self, get_issue_list): cfg_file = os.path.join(os.getcwd(), 'bandit/config/bandit.yaml') conf = config.BanditConfig(cfg_file) self.manager = manager.BanditManager(conf, 'file') (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname self.manager.verbose = True self.manager.files_list = ['binding.py'] self.manager.scores = [{'SEVERITY': [0, 0, 0, 1], 'CONFIDENCE': [0, 0, 0, 1]}] self.manager.skipped = [('abc.py', 'File is bad')] self.manager.excluded_files = ['def.py'] issue_a = _get_issue_instance() issue_b = _get_issue_instance() get_issue_list.return_value = [issue_a, issue_b] self.manager.metrics.data['_totals'] = {'loc': 1000, 'nosec': 50} for category in ['SEVERITY', 'CONFIDENCE']: for level in ['UNDEFINED', 'LOW', 'MEDIUM', 'HIGH']: self.manager.metrics.data['_totals']['%s.%s' % (category, level)] = 1 # Validate that we're outputting the correct issues output_str_fn = 'bandit.formatters.text._output_issue_str' with mock.patch(output_str_fn) as output_str: output_str.return_value = 'ISSUE_OUTPUT_TEXT' b_text.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW, lines=5) calls = [mock.call(issue_a, '', lines=5), mock.call(issue_b, '', lines=5)] output_str.assert_has_calls(calls, any_order=True) # Validate that we're outputting all of the expected fields and the # correct values b_text.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW, lines=5) with open(self.tmp_fname) as f: data = f.read() expected_items = ['Run started', 'Files in scope (1)', 'binding.py (score: ', "CONFIDENCE: 1", "SEVERITY: 1", 'Files excluded (1):', 'def.py', 'Undefined: 1', 'Low: 1', 'Medium: 1', 'High: 1', 'Total lines skipped ', '(#nosec): 50', 'Total issues (by severity)', 'Total issues (by confidence)', 'Files skipped (1)', 'abc.py (File is bad)' ] for item in expected_items: self.assertIn(item, data)
def main(): # bring our logging stuff up as early as possible debug = ('-d' in sys.argv or '--debug' in sys.argv) _init_logger(debug) extension_mgr = _init_extensions() baseline_formatters = [ f.name for f in filter(lambda x: hasattr(x.plugin, '_accepts_baseline'), extension_mgr.formatters) ] # now do normal startup parser = argparse.ArgumentParser( description='Bandit - a Python source code analyzer.', formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('targets', metavar='targets', type=str, nargs='+', help='source file(s) or directory(s) to be tested') parser.add_argument('-r', '--recursive', dest='recursive', action='store_true', help='process files in subdirectories') parser.add_argument( '-a', '--aggregate', dest='agg_type', action='store', default='file', type=str, choices=['file', 'vuln'], help='group results by vulnerability type or file it occurs in') parser.add_argument( '-n', '--number', dest='context_lines', action='store', default=3, type=int, help='max number of code lines to display for each issue identified') parser.add_argument( '-c', '--configfile', dest='config_file', action='store', default=None, type=str, help=('optional config file to use for selecting plugins and ' 'overriding defaults')) parser.add_argument( '-p', '--profile', dest='profile', action='store', default=None, type=str, help='test set profile in config to use (defaults to all tests)') parser.add_argument('-t', '--tests', dest='tests', action='store', default=None, type=str, help='comma separated list of test IDs to run') parser.add_argument('-s', '--skip', dest='skips', action='store', default=None, type=str, help='comma separated list of test IDs to skip') parser.add_argument( '-l', '--level', dest='severity', action='count', default=1, help=('results severity filter. Show only issues of a given' ' severity level or higher. -l for LOW,' ' -ll for MEDIUM, -lll for HIGH')) parser.add_argument( '-i', '--confidence', dest='confidence', action='count', default=1, help='confidence results filter, show only issues of this ' 'level or higher. -i for LOW, -ii for MEDIUM, ' '-iii for HIGH') output_format = 'screen' if sys.stdout.isatty() else 'txt' parser.add_argument('-f', '--format', dest='output_format', action='store', default=output_format, help='specify output format', choices=sorted(extension_mgr.formatter_names)) parser.add_argument('-o', '--output', dest='output_file', action='store', default=None, help='write report to filename') parser.add_argument( '-v', '--verbose', dest='verbose', action='store_true', help='show extra information like excluded and included files') parser.add_argument('-d', '--debug', dest='debug', action='store_true', help='turn on debug mode') parser.add_argument('--ignore-nosec', dest='ignore_nosec', action='store_true', help='do not skip lines with # nosec comments') parser.add_argument( '-x', '--exclude', dest='excluded_paths', action='store', default='', help='Comma separated list of paths to exclude from scan. ' 'Note that these are in addition to the excluded ' 'paths provided in the config file.') parser.add_argument('-b', '--baseline', dest='baseline', action='store', default=None, help=('Path to a baseline report. Only JSON formatted ' 'files are accepted.')) parser.add_argument( '--ini', dest='ini_path', action='store', default=None, help='Path to a .bandit file which supplies command line arguments to ' 'Bandit.') parser.add_argument( '--version', action='version', version='%(prog)s {version}'.format(version=bandit.__version__)) parser.set_defaults(debug=False) parser.set_defaults(verbose=False) parser.set_defaults(ignore_nosec=False) plugin_info = [ "%s\t%s" % (a[0], a[1].name) for a in six.iteritems(extension_mgr.plugins_by_id) ] blacklist_info = [] for a in six.iteritems(extension_mgr.blacklist): for b in a[1]: blacklist_info.append('%s\t%s' % (b['id'], b['name'])) plugin_list = '\n\t'.join(sorted(set(plugin_info + blacklist_info))) parser.epilog = ('The following plugin suites were discovered and' ' loaded:\n\t{0}\n'.format(plugin_list)) # setup work - parse arguments, and initialize BanditManager args = parser.parse_args() try: b_conf = b_config.BanditConfig(config_file=args.config_file) except (utils.ConfigFileUnopenable, utils.ConfigFileInvalidYaml) as e: logger.error(e) sys.exit(2) # Handle .bandit files in projects to pass cmdline args from file ini_options = _get_options_from_ini(args.ini_path, args.targets) if ini_options: # prefer command line, then ini file args.excluded_paths = _log_option_source(args.excluded_paths, ini_options.get('exclude'), 'excluded paths') args.skips = _log_option_source(args.skips, ini_options.get('skips'), 'skipped tests') args.tests = _log_option_source(args.tests, ini_options.get('tests'), 'selected tests') # TODO(tmcpeak): any other useful options to pass from .bandit? # if the log format string was set in the options, reinitialize if b_conf.get_option('log_format'): log_format = b_conf.get_option('log_format') _init_logger(debug, log_format=log_format) try: profile = _get_profile(b_conf, args.profile, args.config_file) _log_info(args, profile) profile['include'].update(args.tests.split(',') if args.tests else []) profile['exclude'].update(args.skips.split(',') if args.skips else []) extension_mgr.validate_profile(profile) except (utils.ProfileNotFound, ValueError) as e: logger.error(e) sys.exit(2) b_mgr = b_manager.BanditManager(b_conf, args.agg_type, args.debug, profile=profile, verbose=args.verbose, ignore_nosec=args.ignore_nosec) if args.baseline is not None: try: with open(args.baseline) as bl: data = bl.read() b_mgr.populate_baseline(data) except IOError: logger.warning("Could not open baseline report: %s", args.baseline) sys.exit(2) if args.output_format not in baseline_formatters: logger.warning('Baseline must be used with one of the following ' 'formats: ' + str(baseline_formatters)) sys.exit(2) if args.output_format != "json": if args.config_file: logger.info("using config: %s", args.config_file) logger.info("running on Python %d.%d.%d", sys.version_info.major, sys.version_info.minor, sys.version_info.micro) # initiate file discovery step within Bandit Manager b_mgr.discover_files(args.targets, args.recursive, args.excluded_paths) if not b_mgr.b_ts.tests: logger.error('No tests would be run, please check the profile.') sys.exit(2) # initiate execution of tests within Bandit Manager b_mgr.run_tests() logger.debug(b_mgr.b_ma) logger.debug(b_mgr.metrics) # trigger output of results by Bandit Manager sev_level = constants.RANKING[args.severity - 1] conf_level = constants.RANKING[args.confidence - 1] b_mgr.output_results(args.context_lines, sev_level, conf_level, args.output_file, args.output_format) # return an exit code of 1 if there are results, 0 otherwise if b_mgr.results_count(sev_filter=sev_level, conf_filter=conf_level) > 0: sys.exit(1) else: sys.exit(0)
def test_report_nobaseline(self, get_issue_list): conf = config.BanditConfig() self.manager = manager.BanditManager(conf, 'file') (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname self.manager.verbose = True self.manager.files_list = ['binding.py'] self.manager.scores = [{'SEVERITY': [0, 0, 0, 1], 'CONFIDENCE': [0, 0, 0, 1]}] self.manager.skipped = [('abc.py', 'File is bad')] self.manager.excluded_files = ['def.py'] issue_a = _get_issue_instance() issue_b = _get_issue_instance() get_issue_list.return_value = [issue_a, issue_b] self.manager.metrics.data['_totals'] = {'loc': 1000, 'nosec': 50} for category in ['SEVERITY', 'CONFIDENCE']: for level in ['UNDEFINED', 'LOW', 'MEDIUM', 'HIGH']: self.manager.metrics.data['_totals']['%s.%s' % (category, level)] = 1 # Validate that we're outputting the correct issues output_str_fn = 'bandit.formatters.screen._output_issue_str' with mock.patch(output_str_fn) as output_str: output_str.return_value = 'ISSUE_OUTPUT_TEXT' tmp_file = open(self.tmp_fname, 'w') screen.report(self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5) calls = [mock.call(issue_a, '', lines=5), mock.call(issue_b, '', lines=5)] output_str.assert_has_calls(calls, any_order=True) # Validate that we're outputting all of the expected fields and the # correct values with mock.patch('bandit.formatters.screen.do_print') as m: tmp_file = open(self.tmp_fname, 'w') screen.report(self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5) data = '\n'.join([str(a) for a in m.call_args[0][0]]) expected = 'Run started' self.assertIn(expected, data) expected_items = [ screen.header('Files in scope (1):'), '\n\tbinding.py (score: {SEVERITY: 1, CONFIDENCE: 1})'] for item in expected_items: self.assertIn(item, data) expected = screen.header('Files excluded (1):') + '\n\tdef.py' self.assertIn(expected, data) expected = ('Total lines of code: 1000\n\tTotal lines skipped ' '(#nosec): 50') self.assertIn(expected, data) expected = ('Total issues (by severity):\n\t\tUndefined: 1\n\t\t' 'Low: 1\n\t\tMedium: 1\n\t\tHigh: 1') self.assertIn(expected, data) expected = ('Total issues (by confidence):\n\t\tUndefined: 1\n\t\t' 'Low: 1\n\t\tMedium: 1\n\t\tHigh: 1') self.assertIn(expected, data) expected = (screen.header('Files skipped (1):') + '\n\tabc.py (File is bad)') self.assertIn(expected, data)
def main(): # bring our logging stuff up as early as possible debug = (logging.DEBUG if '-d' in sys.argv or '--debug' in sys.argv else logging.INFO) _init_logger(debug) extension_mgr = _init_extensions() baseline_formatters = [ f.name for f in filter(lambda x: hasattr(x.plugin, '_accepts_baseline'), extension_mgr.formatters) ] # now do normal startup parser = argparse.ArgumentParser( description='Bandit - a Python source code security analyzer', formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('targets', metavar='targets', type=str, nargs='*', help='source file(s) or directory(s) to be tested') parser.add_argument('-r', '--recursive', dest='recursive', action='store_true', help='find and process files in subdirectories') parser.add_argument( '-a', '--aggregate', dest='agg_type', action='store', default='file', type=str, choices=['file', 'vuln'], help='aggregate output by vulnerability (default) or by filename') parser.add_argument( '-n', '--number', dest='context_lines', action='store', default=3, type=int, help='maximum number of code lines to output for each issue') parser.add_argument( '-c', '--configfile', dest='config_file', action='store', default=None, type=str, help='optional config file to use for selecting plugins and ' 'overriding defaults') parser.add_argument( '-p', '--profile', dest='profile', action='store', default=None, type=str, help='profile to use (defaults to executing all tests)') parser.add_argument('-t', '--tests', dest='tests', action='store', default=None, type=str, help='comma-separated list of test IDs to run') parser.add_argument('-s', '--skip', dest='skips', action='store', default=None, type=str, help='comma-separated list of test IDs to skip') parser.add_argument('-l', '--level', dest='severity', action='count', default=1, help='report only issues of a given severity level or ' 'higher (-l for LOW, -ll for MEDIUM, -lll for HIGH)') parser.add_argument( '-i', '--confidence', dest='confidence', action='count', default=1, help='report only issues of a given confidence level or ' 'higher (-i for LOW, -ii for MEDIUM, -iii for HIGH)') output_format = 'screen' if sys.stdout.isatty() else 'txt' parser.add_argument('-f', '--format', dest='output_format', action='store', default=output_format, help='specify output format', choices=sorted(extension_mgr.formatter_names)) parser.add_argument( '--msg-template', action='store', default=None, help='specify output message template' ' (only usable with --format custom),' ' see CUSTOM FORMAT section' ' for list of available values', ) parser.add_argument('-o', '--output', dest='output_file', action='store', nargs='?', type=argparse.FileType('w'), default=sys.stdout, help='write report to filename') group = parser.add_mutually_exclusive_group(required=False) group.add_argument( '-v', '--verbose', dest='verbose', action='store_true', help='output extra information like excluded and included files') parser.add_argument('-d', '--debug', dest='debug', action='store_true', help='turn on debug mode') group.add_argument('-q', '--quiet', '--silent', dest='quiet', action='store_true', help='only show output in the case of an error') parser.add_argument('--ignore-nosec', dest='ignore_nosec', action='store_true', help='do not skip lines with # nosec comments') parser.add_argument('-x', '--exclude', dest='excluded_paths', action='store', default='', help='comma-separated list of paths (glob patterns ' 'supported) to exclude from scan ' '(note that these are in addition to the excluded ' 'paths provided in the config file)') parser.add_argument('-b', '--baseline', dest='baseline', action='store', default=None, help='path of a baseline report to compare against ' '(only JSON-formatted files are accepted)') parser.add_argument( '--ini', dest='ini_path', action='store', default=None, help='path to a .bandit file that supplies command line arguments') python_ver = sys.version.replace('\n', '') parser.add_argument( '--version', action='version', version='%(prog)s {version}\n python version = {python}'.format( version=bandit.__version__, python=python_ver)) parser.set_defaults(debug=False) parser.set_defaults(verbose=False) parser.set_defaults(quiet=False) parser.set_defaults(ignore_nosec=False) plugin_info = [ "%s\t%s" % (a[0], a[1].name) for a in extension_mgr.plugins_by_id.items() ] blacklist_info = [] for a in extension_mgr.blacklist.items(): for b in a[1]: blacklist_info.append('%s\t%s' % (b['id'], b['name'])) plugin_list = '\n\t'.join(sorted(set(plugin_info + blacklist_info))) dedent_text = textwrap.dedent(''' CUSTOM FORMATTING ----------------- Available tags: {abspath}, {relpath}, {line}, {test_id}, {severity}, {msg}, {confidence}, {range} Example usage: Default template: bandit -r examples/ --format custom --msg-template \\ "{abspath}:{line}: {test_id}[bandit]: {severity}: {msg}" Provides same output as: bandit -r examples/ --format custom Tags can also be formatted in python string.format() style: bandit -r examples/ --format custom --msg-template \\ "{relpath:20.20s}: {line:03}: {test_id:^8}: DEFECT: {msg:>20}" See python documentation for more information about formatting style: https://docs.python.org/3.4/library/string.html The following tests were discovered and loaded: ----------------------------------------------- ''') parser.epilog = dedent_text + "\t{0}".format(plugin_list) # setup work - parse arguments, and initialize BanditManager args = parser.parse_args() # Check if `--msg-template` is not present without custom formatter if args.output_format != 'custom' and args.msg_template is not None: parser.error("--msg-template can only be used with --format=custom") try: b_conf = b_config.BanditConfig(config_file=args.config_file) except utils.ConfigError as e: LOG.error(e) sys.exit(2) # Handle .bandit files in projects to pass cmdline args from file ini_options = _get_options_from_ini(args.ini_path, args.targets) if ini_options: # prefer command line, then ini file args.excluded_paths = _log_option_source(args.excluded_paths, ini_options.get('exclude'), 'excluded paths') args.skips = _log_option_source(args.skips, ini_options.get('skips'), 'skipped tests') args.tests = _log_option_source(args.tests, ini_options.get('tests'), 'selected tests') ini_targets = ini_options.get('targets') if ini_targets: ini_targets = ini_targets.split(',') args.targets = _log_option_source(args.targets, ini_targets, 'selected targets') # TODO(tmcpeak): any other useful options to pass from .bandit? if not args.targets: LOG.error("No targets found in CLI or ini files, exiting.") sys.exit(2) # if the log format string was set in the options, reinitialize if b_conf.get_option('log_format'): log_format = b_conf.get_option('log_format') _init_logger(log_level=logging.DEBUG, log_format=log_format) if args.quiet: _init_logger(log_level=logging.WARN) try: profile = _get_profile(b_conf, args.profile, args.config_file) _log_info(args, profile) profile['include'].update(args.tests.split(',') if args.tests else []) profile['exclude'].update(args.skips.split(',') if args.skips else []) extension_mgr.validate_profile(profile) except (utils.ProfileNotFound, ValueError) as e: LOG.error(e) sys.exit(2) b_mgr = b_manager.BanditManager(b_conf, args.agg_type, args.debug, profile=profile, verbose=args.verbose, quiet=args.quiet, ignore_nosec=args.ignore_nosec) if args.baseline is not None: try: with open(args.baseline) as bl: data = bl.read() b_mgr.populate_baseline(data) except IOError: LOG.warning("Could not open baseline report: %s", args.baseline) sys.exit(2) if args.output_format not in baseline_formatters: LOG.warning('Baseline must be used with one of the following ' 'formats: ' + str(baseline_formatters)) sys.exit(2) if args.output_format != "json": if args.config_file: LOG.info("using config: %s", args.config_file) LOG.info("running on Python %d.%d.%d", sys.version_info.major, sys.version_info.minor, sys.version_info.micro) # initiate file discovery step within Bandit Manager b_mgr.discover_files(args.targets, args.recursive, args.excluded_paths) if not b_mgr.b_ts.tests: LOG.error('No tests would be run, please check the profile.') sys.exit(2) # initiate execution of tests within Bandit Manager b_mgr.run_tests() LOG.debug(b_mgr.b_ma) LOG.debug(b_mgr.metrics) # trigger output of results by Bandit Manager sev_level = constants.RANKING[args.severity - 1] conf_level = constants.RANKING[args.confidence - 1] b_mgr.output_results(args.context_lines, sev_level, conf_level, args.output_file, args.output_format, args.msg_template) # return an exit code of 1 if there are results, 0 otherwise if b_mgr.results_count(sev_filter=sev_level, conf_filter=conf_level) > 0: sys.exit(1) else: sys.exit(0)
def test_report_nobaseline(self, get_issue_list): conf = config.BanditConfig() self.manager = manager.BanditManager(conf, "file") (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname self.manager.verbose = True self.manager.files_list = ["binding.py"] self.manager.scores = [ {"SEVERITY": [0, 0, 0, 1], "CONFIDENCE": [0, 0, 0, 1]} ] self.manager.skipped = [("abc.py", "File is bad")] self.manager.excluded_files = ["def.py"] issue_a = _get_issue_instance() issue_b = _get_issue_instance() get_issue_list.return_value = [issue_a, issue_b] self.manager.metrics.data["_totals"] = {"loc": 1000, "nosec": 50} for category in ["SEVERITY", "CONFIDENCE"]: for level in ["UNDEFINED", "LOW", "MEDIUM", "HIGH"]: self.manager.metrics.data["_totals"][f"{category}.{level}"] = 1 # Validate that we're outputting the correct issues output_str_fn = "bandit.formatters.screen._output_issue_str" with mock.patch(output_str_fn) as output_str: output_str.return_value = "ISSUE_OUTPUT_TEXT" with open(self.tmp_fname, "w") as tmp_file: screen.report( self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5 ) calls = [ mock.call(issue_a, "", lines=5), mock.call(issue_b, "", lines=5), ] output_str.assert_has_calls(calls, any_order=True) # Validate that we're outputting all of the expected fields and the # correct values with mock.patch("bandit.formatters.screen.do_print") as m: with open(self.tmp_fname, "w") as tmp_file: screen.report( self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5 ) data = "\n".join([str(a) for a in m.call_args[0][0]]) expected = "Run started" self.assertIn(expected, data) expected_items = [ screen.header("Files in scope (1):"), "\n\tbinding.py (score: {SEVERITY: 1, CONFIDENCE: 1})", ] for item in expected_items: self.assertIn(item, data) expected = screen.header("Files excluded (1):") + "\n\tdef.py" self.assertIn(expected, data) expected = ( "Total lines of code: 1000\n\tTotal lines skipped " "(#nosec): 50" ) self.assertIn(expected, data) expected = ( "Total issues (by severity):\n\t\tUndefined: 1\n\t\t" "Low: 1\n\t\tMedium: 1\n\t\tHigh: 1" ) self.assertIn(expected, data) expected = ( "Total issues (by confidence):\n\t\tUndefined: 1\n\t\t" "Low: 1\n\t\tMedium: 1\n\t\tHigh: 1" ) self.assertIn(expected, data) expected = ( screen.header("Files skipped (1):") + "\n\tabc.py (File is bad)" ) self.assertIn(expected, data)