Ejemplo n.º 1
0
    def test_no_issues(self, get_issue_list):
        cfg_file = os.path.join(os.getcwd(), 'bandit/config/bandit.yaml')
        conf = config.BanditConfig(cfg_file)
        self.manager = manager.BanditManager(conf, 'file')

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        get_issue_list.return_value = OrderedDict()
        with mock.patch('bandit.formatters.screen.do_print') as m:
            screen.report(self.manager,
                          self.tmp_fname,
                          bandit.LOW,
                          bandit.LOW,
                          lines=5)
            self.assertIn('No issues identified.',
                          '\n'.join([str(a) for a in m.call_args]))
Ejemplo n.º 2
0
    def test_no_issues(self, get_issue_list):
        conf = config.BanditConfig()
        self.manager = manager.BanditManager(conf, 'file')

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        get_issue_list.return_value = collections.OrderedDict()
        with open(self.tmp_fname, 'w') as tmp_file:
            b_text.report(self.manager,
                          tmp_file,
                          bandit.LOW,
                          bandit.LOW,
                          lines=5)

        with open(self.tmp_fname) as f:
            data = f.read()
            self.assertIn('No issues identified.', data)
Ejemplo n.º 3
0
    def setUp(self):
        super(CsvFormatterTests, self).setUp()
        conf = config.BanditConfig()
        self.manager = manager.BanditManager(conf, 'file')
        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.context = {'filename': self.tmp_fname,
                        'lineno': 4,
                        'linerange': [4]}
        self.check_name = 'hardcoded_bind_all_interfaces'
        self.issue = issue.Issue(bandit.MEDIUM, bandit.MEDIUM,
                                 'Possible binding to all interfaces.')
        self.manager.out_file = self.tmp_fname

        self.issue.fname = self.context['filename']
        self.issue.lineno = self.context['lineno']
        self.issue.linerange = self.context['linerange']
        self.issue.test = self.check_name

        self.manager.results.append(self.issue)
Ejemplo n.º 4
0
    def test_report_baseline(self, get_issue_list):
        cfg_file = os.path.join(os.getcwd(), 'bandit/config/bandit.yaml')
        conf = config.BanditConfig(cfg_file)
        self.manager = manager.BanditManager(conf, 'file')

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        issue_a = _get_issue_instance()
        issue_b = _get_issue_instance()

        issue_x = _get_issue_instance()
        issue_x.fname = 'x'
        issue_y = _get_issue_instance()
        issue_y.fname = 'y'
        issue_z = _get_issue_instance()
        issue_z.fname = 'z'

        get_issue_list.return_value = OrderedDict([(issue_a, [issue_x]),
                                                   (issue_b,
                                                    [issue_y, issue_z])])

        # Validate that we're outputting the correct issues
        indent_val = ' ' * 10
        output_str_fn = 'bandit.formatters.screen._output_issue_str'
        with mock.patch(output_str_fn) as output_str:
            output_str.return_value = 'ISSUE_OUTPUT_TEXT'

            screen.report(self.manager,
                          self.tmp_fname,
                          bandit.LOW,
                          bandit.LOW,
                          lines=5)

            calls = [
                mock.call(issue_a, '', lines=5),
                mock.call(issue_b, '', show_code=False, show_lineno=False),
                mock.call(issue_y, indent_val, lines=5),
                mock.call(issue_z, indent_val, lines=5)
            ]

            output_str.assert_has_calls(calls, any_order=True)
Ejemplo n.º 5
0
    def test_report_baseline(self, get_issue_list):
        conf = config.BanditConfig()
        self.manager = manager.BanditManager(conf, "file")

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        issue_a = _get_issue_instance()
        issue_b = _get_issue_instance()

        issue_x = _get_issue_instance()
        issue_x.fname = "x"
        issue_y = _get_issue_instance()
        issue_y.fname = "y"
        issue_z = _get_issue_instance()
        issue_z.fname = "z"

        get_issue_list.return_value = collections.OrderedDict([
            (issue_a, [issue_x]), (issue_b, [issue_y, issue_z])
        ])

        # Validate that we're outputting the correct issues
        indent_val = " " * 10
        output_str_fn = "bandit.formatters.text._output_issue_str"
        with mock.patch(output_str_fn) as output_str:
            output_str.return_value = "ISSUE_OUTPUT_TEXT"

            with open(self.tmp_fname, "w") as tmp_file:
                b_text.report(self.manager,
                              tmp_file,
                              bandit.LOW,
                              bandit.LOW,
                              lines=5)

            calls = [
                mock.call(issue_a, "", lines=5),
                mock.call(issue_b, "", show_code=False, show_lineno=False),
                mock.call(issue_y, indent_val, lines=5),
                mock.call(issue_z, indent_val, lines=5),
            ]

            output_str.assert_has_calls(calls, any_order=True)
Ejemplo n.º 6
0
    def setUp(self):
        super().setUp()
        conf = config.BanditConfig()
        self.manager = manager.BanditManager(conf, "file")
        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.context = {
            "filename": self.tmp_fname,
            "lineno": 4,
            "linerange": [4],
        }
        self.check_name = "hardcoded_bind_all_interfaces"
        self.issue = issue.Issue(bandit.MEDIUM, bandit.MEDIUM,
                                 "Possible binding to all interfaces.")
        self.manager.out_file = self.tmp_fname

        self.issue.fname = self.context["filename"]
        self.issue.lineno = self.context["lineno"]
        self.issue.linerange = self.context["linerange"]
        self.issue.test = self.check_name

        self.manager.results.append(self.issue)
Ejemplo n.º 7
0
    def setUp(self):
        super().setUp()
        conf = config.BanditConfig()
        self.manager = manager.BanditManager(conf, "file")
        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.context = {
            "filename": self.tmp_fname,
            "lineno": 4,
            "linerange": [4],
        }
        self.check_name = "hardcoded_bind_all_interfaces"
        self.issue = issue.Issue(
            bandit.MEDIUM,
            123,
            bandit.MEDIUM,
            "Possible binding to all interfaces.",
        )

        self.candidates = [
            issue.Issue(bandit.LOW, 123, bandit.LOW, "Candidate A", lineno=1),
            issue.Issue(bandit.HIGH, 123, bandit.HIGH, "Candiate B", lineno=2),
        ]

        self.manager.out_file = self.tmp_fname

        self.issue.fname = self.context["filename"]
        self.issue.lineno = self.context["lineno"]
        self.issue.linerange = self.context["linerange"]
        self.issue.test = self.check_name

        self.manager.results.append(self.issue)
        self.manager.metrics = metrics.Metrics()

        # mock up the metrics
        for key in ["_totals", "binding.py"]:
            self.manager.metrics.data[key] = {"loc": 4, "nosec": 2}
            for (criteria, default) in constants.CRITERIA:
                for rank in constants.RANKING:
                    self.manager.metrics.data[key][f"{criteria}.{rank}"] = 0
Ejemplo n.º 8
0
    def setUp(self):
        super(JsonFormatterTests, self).setUp()
        conf = config.BanditConfig()
        self.manager = manager.BanditManager(conf, 'file')
        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.context = {
            'filename': self.tmp_fname,
            'lineno': 4,
            'linerange': [4]
        }
        self.check_name = 'hardcoded_bind_all_interfaces'
        self.issue = issue.Issue(bandit.MEDIUM, bandit.MEDIUM,
                                 'Possible binding to all interfaces.')

        self.candidates = [
            issue.Issue(bandit.LOW, bandit.LOW, 'Candidate A', lineno=1),
            issue.Issue(bandit.HIGH, bandit.HIGH, 'Candiate B', lineno=2)
        ]

        self.manager.out_file = self.tmp_fname

        self.issue.fname = self.context['filename']
        self.issue.lineno = self.context['lineno']
        self.issue.linerange = self.context['linerange']
        self.issue.test = self.check_name

        self.manager.results.append(self.issue)
        self.manager.metrics = metrics.Metrics()

        # mock up the metrics
        for key in ['_totals', 'binding.py']:
            self.manager.metrics.data[key] = {'loc': 4, 'nosec': 2}
            for (criteria, default) in constants.CRITERIA:
                for rank in constants.RANKING:
                    self.manager.metrics.data[key]['{0}.{1}'.format(
                        criteria, rank)] = 0
Ejemplo n.º 9
0
def main():
    # bring our logging stuff up as early as possible
    debug = ('-d' in sys.argv or '--debug' in sys.argv)
    _init_logger(debug)
    extension_mgr = _init_extensions()

    baseline_formatters = [
        f.name
        for f in filter(lambda x: hasattr(x.plugin, '_accepts_baseline'),
                        extension_mgr.formatters)
    ]

    # now do normal startup
    parser = argparse.ArgumentParser(
        description='Bandit - a Python source code analyzer.',
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('targets',
                        metavar='targets',
                        type=str,
                        nargs='+',
                        help='source file(s) or directory(s) to be tested')
    parser.add_argument('-r',
                        '--recursive',
                        dest='recursive',
                        action='store_true',
                        help='process files in subdirectories')
    parser.add_argument(
        '-a',
        '--aggregate',
        dest='agg_type',
        action='store',
        default='file',
        type=str,
        choices=['file', 'vuln'],
        help='group results by vulnerability type or file it occurs in')
    parser.add_argument(
        '-n',
        '--number',
        dest='context_lines',
        action='store',
        default=3,
        type=int,
        help='max number of code lines to display for each issue identified')
    parser.add_argument(
        '-c',
        '--configfile',
        dest='config_file',
        action='store',
        default=None,
        type=str,
        help=('optional config file to use for selecting plugins and '
              'overriding defaults'))
    parser.add_argument(
        '-p',
        '--profile',
        dest='profile',
        action='store',
        default=None,
        type=str,
        help='test set profile in config to use (defaults to all tests)')
    parser.add_argument('-t',
                        '--tests',
                        dest='tests',
                        action='store',
                        default=None,
                        type=str,
                        help='comma separated list of test IDs to run')
    parser.add_argument('-s',
                        '--skip',
                        dest='skips',
                        action='store',
                        default=None,
                        type=str,
                        help='comma separated list of test IDs to skip')
    parser.add_argument(
        '-l',
        '--level',
        dest='severity',
        action='count',
        default=1,
        help=('results severity filter. Show only issues of a given'
              ' severity level or higher. -l for LOW,'
              ' -ll for MEDIUM, -lll for HIGH'))
    parser.add_argument(
        '-i',
        '--confidence',
        dest='confidence',
        action='count',
        default=1,
        help='confidence results filter, show only issues of this '
        'level or higher. -i for LOW, -ii for MEDIUM, '
        '-iii for HIGH')
    output_format = 'screen' if sys.stdout.isatty() else 'txt'
    parser.add_argument('-f',
                        '--format',
                        dest='output_format',
                        action='store',
                        default=output_format,
                        help='specify output format',
                        choices=sorted(extension_mgr.formatter_names))
    parser.add_argument('-o',
                        '--output',
                        dest='output_file',
                        action='store',
                        default=None,
                        help='write report to filename')
    parser.add_argument(
        '-v',
        '--verbose',
        dest='verbose',
        action='store_true',
        help='show extra information like excluded and included files')
    parser.add_argument('-d',
                        '--debug',
                        dest='debug',
                        action='store_true',
                        help='turn on debug mode')
    parser.add_argument('--ignore-nosec',
                        dest='ignore_nosec',
                        action='store_true',
                        help='do not skip lines with # nosec comments')
    parser.add_argument(
        '-x',
        '--exclude',
        dest='excluded_paths',
        action='store',
        default='',
        help='Comma separated list of paths to exclude from scan. '
        'Note that these are in addition to the excluded '
        'paths provided in the config file.')
    parser.add_argument('-b',
                        '--baseline',
                        dest='baseline',
                        action='store',
                        default=None,
                        help=('Path to a baseline report. Only JSON formatted '
                              'files are accepted.'))
    parser.add_argument(
        '--ini',
        dest='ini_path',
        action='store',
        default=None,
        help='Path to a .bandit file which supplies command line arguments to '
        'Bandit.')
    parser.add_argument(
        '--version',
        action='version',
        version='%(prog)s {version}'.format(version=bandit.__version__))
    parser.set_defaults(debug=False)
    parser.set_defaults(verbose=False)
    parser.set_defaults(ignore_nosec=False)

    plugin_info = [
        "%s\t%s" % (a[0], a[1].name)
        for a in six.iteritems(extension_mgr.plugins_by_id)
    ]
    blacklist_info = []
    for a in six.iteritems(extension_mgr.blacklist):
        for b in a[1]:
            blacklist_info.append('%s\t%s' % (b['id'], b['name']))

    plugin_list = '\n\t'.join(sorted(set(plugin_info + blacklist_info)))
    parser.epilog = ('The following plugin suites were discovered and'
                     ' loaded:\n\t{0}\n'.format(plugin_list))

    # setup work - parse arguments, and initialize BanditManager
    args = parser.parse_args()

    try:
        b_conf = b_config.BanditConfig(config_file=args.config_file)
    except (utils.ConfigFileUnopenable, utils.ConfigFileInvalidYaml) as e:
        logger.error(e)
        sys.exit(2)

    # Handle .bandit files in projects to pass cmdline args from file
    ini_options = _get_options_from_ini(args.ini_path, args.targets)
    if ini_options:
        # prefer command line, then ini file
        args.excluded_paths = _log_option_source(args.excluded_paths,
                                                 ini_options.get('exclude'),
                                                 'excluded paths')

        args.skips = _log_option_source(args.skips, ini_options.get('skips'),
                                        'skipped tests')

        args.tests = _log_option_source(args.tests, ini_options.get('tests'),
                                        'selected tests')
        # TODO(tmcpeak): any other useful options to pass from .bandit?

    # if the log format string was set in the options, reinitialize
    if b_conf.get_option('log_format'):
        log_format = b_conf.get_option('log_format')
        _init_logger(debug, log_format=log_format)

    try:
        profile = _get_profile(b_conf, args.profile, args.config_file)
        _log_info(args, profile)

        profile['include'].update(args.tests.split(',') if args.tests else [])
        profile['exclude'].update(args.skips.split(',') if args.skips else [])
        extension_mgr.validate_profile(profile)

    except (utils.ProfileNotFound, ValueError) as e:
        logger.error(e)
        sys.exit(2)

    b_mgr = b_manager.BanditManager(b_conf,
                                    args.agg_type,
                                    args.debug,
                                    profile=profile,
                                    verbose=args.verbose,
                                    ignore_nosec=args.ignore_nosec)

    if args.baseline is not None:
        try:
            with open(args.baseline) as bl:
                data = bl.read()
                b_mgr.populate_baseline(data)
        except IOError:
            logger.warning("Could not open baseline report: %s", args.baseline)
            sys.exit(2)

        if args.output_format not in baseline_formatters:
            logger.warning('Baseline must be used with one of the following '
                           'formats: ' + str(baseline_formatters))
            sys.exit(2)

    if args.output_format != "json":
        if args.config_file:
            logger.info("using config: %s", args.config_file)

        logger.info("running on Python %d.%d.%d", sys.version_info.major,
                    sys.version_info.minor, sys.version_info.micro)

    # initiate file discovery step within Bandit Manager
    b_mgr.discover_files(args.targets, args.recursive, args.excluded_paths)

    if not b_mgr.b_ts.tests:
        logger.error('No tests would be run, please check the profile.')
        sys.exit(2)

    # initiate execution of tests within Bandit Manager
    b_mgr.run_tests()
    logger.debug(b_mgr.b_ma)
    logger.debug(b_mgr.metrics)

    # trigger output of results by Bandit Manager
    sev_level = constants.RANKING[args.severity - 1]
    conf_level = constants.RANKING[args.confidence - 1]
    b_mgr.output_results(args.context_lines, sev_level, conf_level,
                         args.output_file, args.output_format)

    # return an exit code of 1 if there are results, 0 otherwise
    if b_mgr.results_count(sev_filter=sev_level, conf_filter=conf_level) > 0:
        sys.exit(1)
    else:
        sys.exit(0)
Ejemplo n.º 10
0
    def test_report_nobaseline(self, get_issue_list):
        conf = config.BanditConfig()
        self.manager = manager.BanditManager(conf, 'file')

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        self.manager.verbose = True
        self.manager.files_list = ['binding.py']

        self.manager.scores = [{'SEVERITY': [0, 0, 0, 1],
                                'CONFIDENCE': [0, 0, 0, 1]}]

        self.manager.skipped = [('abc.py', 'File is bad')]
        self.manager.excluded_files = ['def.py']

        issue_a = _get_issue_instance()
        issue_b = _get_issue_instance()

        get_issue_list.return_value = [issue_a, issue_b]

        self.manager.metrics.data['_totals'] = {'loc': 1000, 'nosec': 50}
        for category in ['SEVERITY', 'CONFIDENCE']:
            for level in ['UNDEFINED', 'LOW', 'MEDIUM', 'HIGH']:
                self.manager.metrics.data['_totals']['%s.%s' %
                                                     (category, level)] = 1

        # Validate that we're outputting the correct issues
        output_str_fn = 'bandit.formatters.screen._output_issue_str'
        with mock.patch(output_str_fn) as output_str:
            output_str.return_value = 'ISSUE_OUTPUT_TEXT'

            tmp_file = open(self.tmp_fname, 'w')
            screen.report(self.manager, tmp_file, bandit.LOW, bandit.LOW,
                          lines=5)

            calls = [mock.call(issue_a, '', lines=5),
                     mock.call(issue_b, '', lines=5)]

            output_str.assert_has_calls(calls, any_order=True)

        # Validate that we're outputting all of the expected fields and the
        # correct values
        with mock.patch('bandit.formatters.screen.do_print') as m:
            tmp_file = open(self.tmp_fname, 'w')
            screen.report(self.manager, tmp_file, bandit.LOW, bandit.LOW,
                          lines=5)

            data = '\n'.join([str(a) for a in m.call_args[0][0]])

            expected = 'Run started'
            self.assertIn(expected, data)

            expected_items = [
                screen.header('Files in scope (1):'),
                '\n\tbinding.py (score: {SEVERITY: 1, CONFIDENCE: 1})']

            for item in expected_items:
                self.assertIn(item, data)

            expected = screen.header('Files excluded (1):') + '\n\tdef.py'
            self.assertIn(expected, data)

            expected = ('Total lines of code: 1000\n\tTotal lines skipped '
                        '(#nosec): 50')
            self.assertIn(expected, data)

            expected = ('Total issues (by severity):\n\t\tUndefined: 1\n\t\t'
                        'Low: 1\n\t\tMedium: 1\n\t\tHigh: 1')
            self.assertIn(expected, data)

            expected = ('Total issues (by confidence):\n\t\tUndefined: 1\n\t\t'
                        'Low: 1\n\t\tMedium: 1\n\t\tHigh: 1')
            self.assertIn(expected, data)

            expected = (screen.header('Files skipped (1):') +
                        '\n\tabc.py (File is bad)')
            self.assertIn(expected, data)
Ejemplo n.º 11
0
 def setUp(self):
     super().setUp()
     f = self.useFixture(TempFile(self.sample, suffix=self.suffix))
     self.config = config.BanditConfig(f.name)
Ejemplo n.º 12
0
 def test_bad_yaml(self):
     f = self.useFixture(TempFile("[]"))
     try:
         self.config = config.BanditConfig(f.name)
     except utils.ConfigError as e:
         self.assertIn("Error parsing file.", e.message)
Ejemplo n.º 13
0
 def setUp(self):
     super(TestConfigCompat, self).setUp()
     f = self.useFixture(TempFile(self.sample_yaml))
     self.config = config.BanditConfig(f.name)
Ejemplo n.º 14
0
 def setUp(self):
     super().setUp()
     test_yaml = "key: value"
     f = self.useFixture(TempFile(test_yaml))
     self.b_config = config.BanditConfig(f.name)
Ejemplo n.º 15
0
def main():
    # bring our logging stuff up as early as possible
    debug = ('-d' in sys.argv or '--debug' in sys.argv)
    _init_logger(debug)
    # By default path would be /etx/xdg/bandit, we want system paths
    os.environ['XDG_CONFIG_DIRS'] = '/etc:/usr/local/etc'
    extension_mgr = _init_extensions()

    baseline_formatters = [
        f.name
        for f in filter(lambda x: hasattr(x.plugin, '_accepts_baseline'),
                        extension_mgr.formatters)
    ]

    # now do normal startup
    parser = argparse.ArgumentParser(
        description='Bandit - a Python source code analyzer.',
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('targets',
                        metavar='targets',
                        type=str,
                        nargs='+',
                        help='source file(s) or directory(s) to be tested')
    parser.add_argument('-r',
                        '--recursive',
                        dest='recursive',
                        action='store_true',
                        help='process files in subdirectories')
    parser.add_argument(
        '-a',
        '--aggregate',
        dest='agg_type',
        action='store',
        default='file',
        type=str,
        choices=['file', 'vuln'],
        help='group results by vulnerability type or file it occurs in')
    parser.add_argument(
        '-n',
        '--number',
        dest='context_lines',
        action='store',
        default=3,
        type=int,
        help='max number of code lines to display for each issue identified')
    parser.add_argument('-c',
                        '--configfile',
                        dest='config_file',
                        action='store',
                        default=None,
                        type=str,
                        help=('if omitted default locations are checked. '
                              'Check documentation for searched paths'))
    group = parser.add_mutually_exclusive_group()
    group.add_argument(
        '-p',
        '--profile',
        dest='profile',
        action='store',
        default=None,
        type=str,
        help='test set profile in config to use (defaults to all tests)')
    group.add_argument('-t',
                       '--tests',
                       dest='tests',
                       action='store',
                       default=None,
                       type=str,
                       help='list of test names to run')
    parser.add_argument(
        '-l',
        '--level',
        dest='severity',
        action='count',
        default=1,
        help=('results severity filter. Show only issues of a given'
              ' severity level or higher. -l for LOW,'
              ' -ll for MEDIUM, -lll for HIGH'))
    parser.add_argument(
        '-i',
        '--confidence',
        dest='confidence',
        action='count',
        default=1,
        help='confidence results filter, show only issues of this '
        'level or higher. -i for LOW, -ii for MEDIUM, '
        '-iii for HIGH')
    parser.add_argument('-f',
                        '--format',
                        dest='output_format',
                        action='store',
                        default='screen',
                        help='specify output format',
                        choices=sorted(extension_mgr.formatter_names))
    parser.add_argument('-o',
                        '--output',
                        dest='output_file',
                        action='store',
                        default=None,
                        help='write report to filename')
    parser.add_argument(
        '-v',
        '--verbose',
        dest='verbose',
        action='store_true',
        help='show extra information like excluded and included files')
    parser.add_argument('-d',
                        '--debug',
                        dest='debug',
                        action='store_true',
                        help='turn on debug mode')
    parser.add_argument('--ignore-nosec',
                        dest='ignore_nosec',
                        action='store_true',
                        help='do not skip lines with # nosec comments')
    parser.add_argument(
        '-x',
        '--exclude',
        dest='excluded_paths',
        action='store',
        default='',
        help='Comma separated list of paths to exclude from scan. '
        'Note that these are in addition to the excluded '
        'paths provided in the config file.')
    parser.add_argument('-b',
                        '--baseline',
                        dest='baseline',
                        action='store',
                        default=None,
                        help='Path to a baseline report, in JSON format. '
                        'Note: baseline reports must be output in one of '
                        'the following formats: ' + str(baseline_formatters))
    parser.set_defaults(debug=False)
    parser.set_defaults(verbose=False)
    parser.set_defaults(ignore_nosec=False)

    plugin_info = [
        "%s\t%s" % (a[0], a[1].name)
        for a in six.iteritems(extension_mgr.plugins_by_id)
    ]

    plugin_list = '\n\t'.join(sorted(plugin_info))
    parser.epilog = ('The following plugin suites were discovered and'
                     ' loaded:\n\t{0}\n'.format(plugin_list))

    # setup work - parse arguments, and initialize BanditManager
    args = parser.parse_args()
    config_file = args.config_file
    if not config_file:
        try:
            config_file = _find_config()
        except utils.NoConfigFileFound as e:
            logger.error(e)
            sys.exit(2)

    try:
        b_conf = b_config.BanditConfig(config_file)
    except (utils.ConfigFileUnopenable, utils.ConfigFileInvalidYaml) as e:
        logger.error('%s', e)
        sys.exit(2)

    # if the log format string was set in the options, reinitialize
    if b_conf.get_option('log_format'):
        log_format = b_conf.get_option('log_format')
        _init_logger(debug, log_format=log_format)

    profile_name = args.tests.split(',') if args.tests else args.profile

    try:
        b_mgr = b_manager.BanditManager(b_conf,
                                        args.agg_type,
                                        args.debug,
                                        profile_name=profile_name,
                                        verbose=args.verbose,
                                        ignore_nosec=args.ignore_nosec)
    except utils.ProfileNotFound as e:
        logger.error(e)
        sys.exit(2)

    if args.baseline is not None:
        try:
            with open(args.baseline) as bl:
                data = bl.read()
                b_mgr.populate_baseline(data)
        except IOError:
            logger.warning("Could not open baseline report: %s", args.baseline)
            sys.exit(2)

        if args.output_format not in baseline_formatters:
            logger.warning('Baseline must be used with one of the following '
                           'formats: ' + str(baseline_formatters))
            sys.exit(2)

    if args.output_format != "json":
        logger.info("using config: %s", config_file)
        logger.info("running on Python %d.%d.%d", sys.version_info.major,
                    sys.version_info.minor, sys.version_info.micro)

    # no point running if there are no tests available
    if not b_mgr.has_tests:
        logger.error('Could not find any tests to apply, please check '
                     'the configuration.')
        sys.exit(2)

    # initiate file discovery step within Bandit Manager
    b_mgr.discover_files(args.targets, args.recursive, args.excluded_paths)

    # initiate execution of tests within Bandit Manager
    b_mgr.run_tests()
    logger.debug(b_mgr.b_ma)
    logger.debug(b_mgr.metrics)

    # trigger output of results by Bandit Manager
    sev_level = constants.RANKING[args.severity - 1]
    conf_level = constants.RANKING[args.confidence - 1]
    b_mgr.output_results(args.context_lines, sev_level, conf_level,
                         args.output_file, args.output_format)

    # return an exit code of 1 if there are results, 0 otherwise
    if b_mgr.results_count(sev_filter=sev_level, conf_filter=conf_level) > 0:
        sys.exit(1)
    else:
        sys.exit(0)
Ejemplo n.º 16
0
 def setUp(self):
     super(TestGetSetting, self).setUp()
     test_yaml = 'key: value'
     f = self.useFixture(TempFile(test_yaml))
     self.b_config = config.BanditConfig(f.name)
Ejemplo n.º 17
0
    def test_report_nobaseline(self, get_issue_list):
        conf = config.BanditConfig()
        self.manager = manager.BanditManager(conf, "file")

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        self.manager.verbose = True
        self.manager.files_list = ["binding.py"]

        self.manager.scores = [
            {"SEVERITY": [0, 0, 0, 1], "CONFIDENCE": [0, 0, 0, 1]}
        ]

        self.manager.skipped = [("abc.py", "File is bad")]
        self.manager.excluded_files = ["def.py"]

        issue_a = _get_issue_instance()
        issue_b = _get_issue_instance()

        get_issue_list.return_value = [issue_a, issue_b]

        self.manager.metrics.data["_totals"] = {"loc": 1000, "nosec": 50}
        for category in ["SEVERITY", "CONFIDENCE"]:
            for level in ["UNDEFINED", "LOW", "MEDIUM", "HIGH"]:
                self.manager.metrics.data["_totals"][f"{category}.{level}"] = 1

        # Validate that we're outputting the correct issues
        output_str_fn = "bandit.formatters.screen._output_issue_str"
        with mock.patch(output_str_fn) as output_str:
            output_str.return_value = "ISSUE_OUTPUT_TEXT"

            with open(self.tmp_fname, "w") as tmp_file:
                screen.report(
                    self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5
                )

            calls = [
                mock.call(issue_a, "", lines=5),
                mock.call(issue_b, "", lines=5),
            ]

            output_str.assert_has_calls(calls, any_order=True)

        # Validate that we're outputting all of the expected fields and the
        # correct values
        with mock.patch("bandit.formatters.screen.do_print") as m:
            with open(self.tmp_fname, "w") as tmp_file:
                screen.report(
                    self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5
                )

            data = "\n".join([str(a) for a in m.call_args[0][0]])

            expected = "Run started"
            self.assertIn(expected, data)

            expected_items = [
                screen.header("Files in scope (1):"),
                "\n\tbinding.py (score: {SEVERITY: 1, CONFIDENCE: 1})",
            ]

            for item in expected_items:
                self.assertIn(item, data)

            expected = screen.header("Files excluded (1):") + "\n\tdef.py"
            self.assertIn(expected, data)

            expected = (
                "Total lines of code: 1000\n\tTotal lines skipped "
                "(#nosec): 50"
            )
            self.assertIn(expected, data)

            expected = (
                "Total issues (by severity):\n\t\tUndefined: 1\n\t\t"
                "Low: 1\n\t\tMedium: 1\n\t\tHigh: 1"
            )
            self.assertIn(expected, data)

            expected = (
                "Total issues (by confidence):\n\t\tUndefined: 1\n\t\t"
                "Low: 1\n\t\tMedium: 1\n\t\tHigh: 1"
            )
            self.assertIn(expected, data)

            expected = (
                screen.header("Files skipped (1):")
                + "\n\tabc.py (File is bad)"
            )
            self.assertIn(expected, data)
Ejemplo n.º 18
0
    def test_report_nobaseline(self, get_issue_list):
        conf = config.BanditConfig()
        self.manager = manager.BanditManager(conf, "file")

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        self.manager.verbose = True
        self.manager.files_list = ["binding.py"]

        self.manager.scores = [{
            "SEVERITY": [0, 0, 0, 1],
            "CONFIDENCE": [0, 0, 0, 1]
        }]

        self.manager.skipped = [("abc.py", "File is bad")]
        self.manager.excluded_files = ["def.py"]

        issue_a = _get_issue_instance()
        issue_b = _get_issue_instance()

        get_issue_list.return_value = [issue_a, issue_b]

        self.manager.metrics.data["_totals"] = {"loc": 1000, "nosec": 50}
        for category in ["SEVERITY", "CONFIDENCE"]:
            for level in ["UNDEFINED", "LOW", "MEDIUM", "HIGH"]:
                self.manager.metrics.data["_totals"][f"{category}.{level}"] = 1

        # Validate that we're outputting the correct issues
        output_str_fn = "bandit.formatters.text._output_issue_str"
        with mock.patch(output_str_fn) as output_str:
            output_str.return_value = "ISSUE_OUTPUT_TEXT"

            with open(self.tmp_fname, "w") as tmp_file:
                b_text.report(self.manager,
                              tmp_file,
                              bandit.LOW,
                              bandit.LOW,
                              lines=5)

            calls = [
                mock.call(issue_a, "", lines=5),
                mock.call(issue_b, "", lines=5),
            ]

            output_str.assert_has_calls(calls, any_order=True)

        # Validate that we're outputting all of the expected fields and the
        # correct values
        with open(self.tmp_fname, "w") as tmp_file:
            b_text.report(self.manager,
                          tmp_file,
                          bandit.LOW,
                          bandit.LOW,
                          lines=5)
        with open(self.tmp_fname) as f:
            data = f.read()

            expected_items = [
                "Run started",
                "Files in scope (1)",
                "binding.py (score: ",
                "CONFIDENCE: 1",
                "SEVERITY: 1",
                "Files excluded (1):",
                "def.py",
                "Undefined: 1",
                "Low: 1",
                "Medium: 1",
                "High: 1",
                "Total lines skipped ",
                "(#nosec): 50",
                "Total issues (by severity)",
                "Total issues (by confidence)",
                "Files skipped (1)",
                "abc.py (File is bad)",
            ]
            for item in expected_items:
                self.assertIn(item, data)
Ejemplo n.º 19
0
def main():
    # bring our logging stuff up as early as possible
    debug = (logging.DEBUG
             if '-d' in sys.argv or '--debug' in sys.argv else logging.INFO)
    _init_logger(debug)
    extension_mgr = _init_extensions()

    baseline_formatters = [
        f.name
        for f in filter(lambda x: hasattr(x.plugin, '_accepts_baseline'),
                        extension_mgr.formatters)
    ]

    # now do normal startup
    parser = argparse.ArgumentParser(
        description='Bandit - a Python source code security analyzer',
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('targets',
                        metavar='targets',
                        type=str,
                        nargs='*',
                        help='source file(s) or directory(s) to be tested')
    parser.add_argument('-r',
                        '--recursive',
                        dest='recursive',
                        action='store_true',
                        help='find and process files in subdirectories')
    parser.add_argument(
        '-a',
        '--aggregate',
        dest='agg_type',
        action='store',
        default='file',
        type=str,
        choices=['file', 'vuln'],
        help='aggregate output by vulnerability (default) or by filename')
    parser.add_argument(
        '-n',
        '--number',
        dest='context_lines',
        action='store',
        default=3,
        type=int,
        help='maximum number of code lines to output for each issue')
    parser.add_argument(
        '-c',
        '--configfile',
        dest='config_file',
        action='store',
        default=None,
        type=str,
        help='optional config file to use for selecting plugins and '
        'overriding defaults')
    parser.add_argument(
        '-p',
        '--profile',
        dest='profile',
        action='store',
        default=None,
        type=str,
        help='profile to use (defaults to executing all tests)')
    parser.add_argument('-t',
                        '--tests',
                        dest='tests',
                        action='store',
                        default=None,
                        type=str,
                        help='comma-separated list of test IDs to run')
    parser.add_argument('-s',
                        '--skip',
                        dest='skips',
                        action='store',
                        default=None,
                        type=str,
                        help='comma-separated list of test IDs to skip')
    parser.add_argument('-l',
                        '--level',
                        dest='severity',
                        action='count',
                        default=1,
                        help='report only issues of a given severity level or '
                        'higher (-l for LOW, -ll for MEDIUM, -lll for HIGH)')
    parser.add_argument(
        '-i',
        '--confidence',
        dest='confidence',
        action='count',
        default=1,
        help='report only issues of a given confidence level or '
        'higher (-i for LOW, -ii for MEDIUM, -iii for HIGH)')
    output_format = 'screen' if sys.stdout.isatty() else 'txt'
    parser.add_argument('-f',
                        '--format',
                        dest='output_format',
                        action='store',
                        default=output_format,
                        help='specify output format',
                        choices=sorted(extension_mgr.formatter_names))
    parser.add_argument(
        '--msg-template',
        action='store',
        default=None,
        help='specify output message template'
        ' (only usable with --format custom),'
        ' see CUSTOM FORMAT section'
        ' for list of available values',
    )
    parser.add_argument('-o',
                        '--output',
                        dest='output_file',
                        action='store',
                        nargs='?',
                        type=argparse.FileType('w'),
                        default=sys.stdout,
                        help='write report to filename')
    group = parser.add_mutually_exclusive_group(required=False)
    group.add_argument(
        '-v',
        '--verbose',
        dest='verbose',
        action='store_true',
        help='output extra information like excluded and included files')
    parser.add_argument('-d',
                        '--debug',
                        dest='debug',
                        action='store_true',
                        help='turn on debug mode')
    group.add_argument('-q',
                       '--quiet',
                       '--silent',
                       dest='quiet',
                       action='store_true',
                       help='only show output in the case of an error')
    parser.add_argument('--ignore-nosec',
                        dest='ignore_nosec',
                        action='store_true',
                        help='do not skip lines with # nosec comments')
    parser.add_argument('-x',
                        '--exclude',
                        dest='excluded_paths',
                        action='store',
                        default='',
                        help='comma-separated list of paths (glob patterns '
                        'supported) to exclude from scan '
                        '(note that these are in addition to the excluded '
                        'paths provided in the config file)')
    parser.add_argument('-b',
                        '--baseline',
                        dest='baseline',
                        action='store',
                        default=None,
                        help='path of a baseline report to compare against '
                        '(only JSON-formatted files are accepted)')
    parser.add_argument(
        '--ini',
        dest='ini_path',
        action='store',
        default=None,
        help='path to a .bandit file that supplies command line arguments')
    python_ver = sys.version.replace('\n', '')
    parser.add_argument(
        '--version',
        action='version',
        version='%(prog)s {version}\n  python version = {python}'.format(
            version=bandit.__version__, python=python_ver))

    parser.set_defaults(debug=False)
    parser.set_defaults(verbose=False)
    parser.set_defaults(quiet=False)
    parser.set_defaults(ignore_nosec=False)

    plugin_info = [
        "%s\t%s" % (a[0], a[1].name)
        for a in extension_mgr.plugins_by_id.items()
    ]
    blacklist_info = []
    for a in extension_mgr.blacklist.items():
        for b in a[1]:
            blacklist_info.append('%s\t%s' % (b['id'], b['name']))

    plugin_list = '\n\t'.join(sorted(set(plugin_info + blacklist_info)))
    dedent_text = textwrap.dedent('''
    CUSTOM FORMATTING
    -----------------

    Available tags:

        {abspath}, {relpath}, {line},  {test_id},
        {severity}, {msg}, {confidence}, {range}

    Example usage:

        Default template:
        bandit -r examples/ --format custom --msg-template \\
        "{abspath}:{line}: {test_id}[bandit]: {severity}: {msg}"

        Provides same output as:
        bandit -r examples/ --format custom

        Tags can also be formatted in python string.format() style:
        bandit -r examples/ --format custom --msg-template \\
        "{relpath:20.20s}: {line:03}: {test_id:^8}: DEFECT: {msg:>20}"

        See python documentation for more information about formatting style:
        https://docs.python.org/3.4/library/string.html

    The following tests were discovered and loaded:
    -----------------------------------------------
    ''')
    parser.epilog = dedent_text + "\t{0}".format(plugin_list)

    # setup work - parse arguments, and initialize BanditManager
    args = parser.parse_args()
    # Check if `--msg-template` is not present without custom formatter
    if args.output_format != 'custom' and args.msg_template is not None:
        parser.error("--msg-template can only be used with --format=custom")

    try:
        b_conf = b_config.BanditConfig(config_file=args.config_file)
    except utils.ConfigError as e:
        LOG.error(e)
        sys.exit(2)

    # Handle .bandit files in projects to pass cmdline args from file
    ini_options = _get_options_from_ini(args.ini_path, args.targets)
    if ini_options:
        # prefer command line, then ini file
        args.excluded_paths = _log_option_source(args.excluded_paths,
                                                 ini_options.get('exclude'),
                                                 'excluded paths')

        args.skips = _log_option_source(args.skips, ini_options.get('skips'),
                                        'skipped tests')

        args.tests = _log_option_source(args.tests, ini_options.get('tests'),
                                        'selected tests')
        ini_targets = ini_options.get('targets')
        if ini_targets:
            ini_targets = ini_targets.split(',')
        args.targets = _log_option_source(args.targets, ini_targets,
                                          'selected targets')
        # TODO(tmcpeak): any other useful options to pass from .bandit?

    if not args.targets:
        LOG.error("No targets found in CLI or ini files, exiting.")
        sys.exit(2)
    # if the log format string was set in the options, reinitialize
    if b_conf.get_option('log_format'):
        log_format = b_conf.get_option('log_format')
        _init_logger(log_level=logging.DEBUG, log_format=log_format)

    if args.quiet:
        _init_logger(log_level=logging.WARN)

    try:
        profile = _get_profile(b_conf, args.profile, args.config_file)
        _log_info(args, profile)

        profile['include'].update(args.tests.split(',') if args.tests else [])
        profile['exclude'].update(args.skips.split(',') if args.skips else [])
        extension_mgr.validate_profile(profile)

    except (utils.ProfileNotFound, ValueError) as e:
        LOG.error(e)
        sys.exit(2)

    b_mgr = b_manager.BanditManager(b_conf,
                                    args.agg_type,
                                    args.debug,
                                    profile=profile,
                                    verbose=args.verbose,
                                    quiet=args.quiet,
                                    ignore_nosec=args.ignore_nosec)

    if args.baseline is not None:
        try:
            with open(args.baseline) as bl:
                data = bl.read()
                b_mgr.populate_baseline(data)
        except IOError:
            LOG.warning("Could not open baseline report: %s", args.baseline)
            sys.exit(2)

        if args.output_format not in baseline_formatters:
            LOG.warning('Baseline must be used with one of the following '
                        'formats: ' + str(baseline_formatters))
            sys.exit(2)

    if args.output_format != "json":
        if args.config_file:
            LOG.info("using config: %s", args.config_file)

        LOG.info("running on Python %d.%d.%d", sys.version_info.major,
                 sys.version_info.minor, sys.version_info.micro)

    # initiate file discovery step within Bandit Manager
    b_mgr.discover_files(args.targets, args.recursive, args.excluded_paths)

    if not b_mgr.b_ts.tests:
        LOG.error('No tests would be run, please check the profile.')
        sys.exit(2)

    # initiate execution of tests within Bandit Manager
    b_mgr.run_tests()
    LOG.debug(b_mgr.b_ma)
    LOG.debug(b_mgr.metrics)

    # trigger output of results by Bandit Manager
    sev_level = constants.RANKING[args.severity - 1]
    conf_level = constants.RANKING[args.confidence - 1]
    b_mgr.output_results(args.context_lines, sev_level, conf_level,
                         args.output_file, args.output_format,
                         args.msg_template)

    # return an exit code of 1 if there are results, 0 otherwise
    if b_mgr.results_count(sev_filter=sev_level, conf_filter=conf_level) > 0:
        sys.exit(1)
    else:
        sys.exit(0)
Ejemplo n.º 20
0
 def __init__(self, ignore_nosec=False):
     conf = config.BanditConfig()
     self.manager = manager.BanditManager(conf,
                                          None,
                                          ignore_nosec=ignore_nosec)
Ejemplo n.º 21
0
    def test_report_nobaseline(self, get_issue_list):
        cfg_file = os.path.join(os.getcwd(), 'bandit/config/bandit.yaml')
        conf = config.BanditConfig(cfg_file)
        self.manager = manager.BanditManager(conf, 'file')

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        self.manager.verbose = True
        self.manager.files_list = ['binding.py']

        self.manager.scores = [{'SEVERITY': [0, 0, 0, 1],
                                'CONFIDENCE': [0, 0, 0, 1]}]

        self.manager.skipped = [('abc.py', 'File is bad')]
        self.manager.excluded_files = ['def.py']

        issue_a = _get_issue_instance()
        issue_b = _get_issue_instance()

        get_issue_list.return_value = [issue_a, issue_b]

        self.manager.metrics.data['_totals'] = {'loc': 1000, 'nosec': 50}
        for category in ['SEVERITY', 'CONFIDENCE']:
            for level in ['UNDEFINED', 'LOW', 'MEDIUM', 'HIGH']:
                self.manager.metrics.data['_totals']['%s.%s' %
                                                     (category, level)] = 1

        # Validate that we're outputting the correct issues
        output_str_fn = 'bandit.formatters.text._output_issue_str'
        with mock.patch(output_str_fn) as output_str:
            output_str.return_value = 'ISSUE_OUTPUT_TEXT'

            b_text.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW,
                          lines=5)

            calls = [mock.call(issue_a, '', lines=5),
                     mock.call(issue_b, '', lines=5)]

            output_str.assert_has_calls(calls, any_order=True)

        # Validate that we're outputting all of the expected fields and the
        # correct values
        b_text.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW,
                      lines=5)
        with open(self.tmp_fname) as f:
            data = f.read()

            expected_items = ['Run started',
                              'Files in scope (1)',
                              'binding.py (score: ',
                              "CONFIDENCE: 1",
                              "SEVERITY: 1",
                              'Files excluded (1):',
                              'def.py',
                              'Undefined: 1',
                              'Low: 1',
                              'Medium: 1',
                              'High: 1',
                              'Total lines skipped ',
                              '(#nosec): 50',
                              'Total issues (by severity)',
                              'Total issues (by confidence)',
                              'Files skipped (1)',
                              'abc.py (File is bad)'
                              ]
            for item in expected_items:
                self.assertIn(item, data)
Ejemplo n.º 22
0
 def setUp(self):
     super(TestGetSetting, self).setUp()
     f = self.useFixture(TempFile())
     self.b_config = config.BanditConfig(f.name)
Ejemplo n.º 23
0
def main():
    """Bandit CLI."""
    # bring our logging stuff up as early as possible
    debug = (logging.DEBUG
             if "-d" in sys.argv or "--debug" in sys.argv else logging.INFO)
    _init_logger(debug)
    extension_mgr = _init_extensions()

    baseline_formatters = [
        f.name for f in filter(
            lambda x: hasattr(x.plugin, "_accepts_baseline"),
            extension_mgr.formatters,
        )
    ]

    # now do normal startup
    parser = argparse.ArgumentParser(
        description="Bandit - a Python source code security analyzer",
        formatter_class=argparse.RawDescriptionHelpFormatter,
    )
    parser.add_argument(
        "targets",
        metavar="targets",
        type=str,
        nargs="*",
        help="source file(s) or directory(s) to be tested",
    )
    parser.add_argument(
        "-r",
        "--recursive",
        dest="recursive",
        action="store_true",
        help="find and process files in subdirectories",
    )
    parser.add_argument(
        "-a",
        "--aggregate",
        dest="agg_type",
        action="store",
        default="file",
        type=str,
        choices=["file", "vuln"],
        help="aggregate output by vulnerability (default) or by filename",
    )
    parser.add_argument(
        "-n",
        "--number",
        dest="context_lines",
        action="store",
        default=3,
        type=int,
        help="maximum number of code lines to output for each issue",
    )
    parser.add_argument(
        "-c",
        "--configfile",
        dest="config_file",
        action="store",
        default=None,
        type=str,
        help="optional config file to use for selecting plugins and "
        "overriding defaults",
    )
    parser.add_argument(
        "-p",
        "--profile",
        dest="profile",
        action="store",
        default=None,
        type=str,
        help="profile to use (defaults to executing all tests)",
    )
    parser.add_argument(
        "-t",
        "--tests",
        dest="tests",
        action="store",
        default=None,
        type=str,
        help="comma-separated list of test IDs to run",
    )
    parser.add_argument(
        "-s",
        "--skip",
        dest="skips",
        action="store",
        default=None,
        type=str,
        help="comma-separated list of test IDs to skip",
    )
    severity_group = parser.add_mutually_exclusive_group(required=False)
    severity_group.add_argument(
        "-l",
        "--level",
        dest="severity",
        action="count",
        default=1,
        help="report only issues of a given severity level or "
        "higher (-l for LOW, -ll for MEDIUM, -lll for HIGH)",
    )
    severity_group.add_argument(
        "--severity-level",
        dest="severity_string",
        action="store",
        help="report only issues of a given severity level or higher."
        ' "all" and "low" are likely to produce the same results, but it'
        " is possible for rules to be undefined which will"
        ' not be listed in "low".',
        choices=["all", "low", "medium", "high"],
    )
    confidence_group = parser.add_mutually_exclusive_group(required=False)
    confidence_group.add_argument(
        "-i",
        "--confidence",
        dest="confidence",
        action="count",
        default=1,
        help="report only issues of a given confidence level or "
        "higher (-i for LOW, -ii for MEDIUM, -iii for HIGH)",
    )
    confidence_group.add_argument(
        "--confidence-level",
        dest="confidence_string",
        action="store",
        help="report only issues of a given confidence level or higher."
        ' "all" and "low" are likely to produce the same results, but it'
        " is possible for rules to be undefined which will"
        ' not be listed in "low".',
        choices=["all", "low", "medium", "high"],
    )
    output_format = "screen" if sys.stdout.isatty() else "txt"
    parser.add_argument(
        "-f",
        "--format",
        dest="output_format",
        action="store",
        default=output_format,
        help="specify output format",
        choices=sorted(extension_mgr.formatter_names),
    )
    parser.add_argument(
        "--msg-template",
        action="store",
        default=None,
        help="specify output message template"
        " (only usable with --format custom),"
        " see CUSTOM FORMAT section"
        " for list of available values",
    )
    parser.add_argument(
        "-o",
        "--output",
        dest="output_file",
        action="store",
        nargs="?",
        type=argparse.FileType("w", encoding="utf-8"),
        default=sys.stdout,
        help="write report to filename",
    )
    group = parser.add_mutually_exclusive_group(required=False)
    group.add_argument(
        "-v",
        "--verbose",
        dest="verbose",
        action="store_true",
        help="output extra information like excluded and included files",
    )
    parser.add_argument(
        "-d",
        "--debug",
        dest="debug",
        action="store_true",
        help="turn on debug mode",
    )
    group.add_argument(
        "-q",
        "--quiet",
        "--silent",
        dest="quiet",
        action="store_true",
        help="only show output in the case of an error",
    )
    parser.add_argument(
        "--ignore-nosec",
        dest="ignore_nosec",
        action="store_true",
        help="do not skip lines with # nosec comments",
    )
    parser.add_argument(
        "-x",
        "--exclude",
        dest="excluded_paths",
        action="store",
        default=",".join(constants.EXCLUDE),
        help="comma-separated list of paths (glob patterns "
        "supported) to exclude from scan "
        "(note that these are in addition to the excluded "
        "paths provided in the config file) (default: " +
        ",".join(constants.EXCLUDE) + ")",
    )
    parser.add_argument(
        "-b",
        "--baseline",
        dest="baseline",
        action="store",
        default=None,
        help="path of a baseline report to compare against "
        "(only JSON-formatted files are accepted)",
    )
    parser.add_argument(
        "--ini",
        dest="ini_path",
        action="store",
        default=None,
        help="path to a .bandit file that supplies command line arguments",
    )
    parser.add_argument(
        "--exit-zero",
        action="store_true",
        dest="exit_zero",
        default=False,
        help="exit with 0, "
        "even with results found",
    )
    python_ver = sys.version.replace("\n", "")
    parser.add_argument(
        "--version",
        action="version",
        version="%(prog)s {version}\n  python version = {python}".format(
            version=bandit.__version__, python=python_ver),
    )

    parser.set_defaults(debug=False)
    parser.set_defaults(verbose=False)
    parser.set_defaults(quiet=False)
    parser.set_defaults(ignore_nosec=False)

    plugin_info = [
        f"{a[0]}\t{a[1].name}" for a in extension_mgr.plugins_by_id.items()
    ]
    blacklist_info = []
    for a in extension_mgr.blacklist.items():
        for b in a[1]:
            blacklist_info.append("{}\t{}".format(b["id"], b["name"]))

    plugin_list = "\n\t".join(sorted(set(plugin_info + blacklist_info)))
    dedent_text = textwrap.dedent("""
    CUSTOM FORMATTING
    -----------------

    Available tags:

        {abspath}, {relpath}, {line}, {col}, {test_id},
        {severity}, {msg}, {confidence}, {range}

    Example usage:

        Default template:
        bandit -r examples/ --format custom --msg-template \\
        "{abspath}:{line}: {test_id}[bandit]: {severity}: {msg}"

        Provides same output as:
        bandit -r examples/ --format custom

        Tags can also be formatted in python string.format() style:
        bandit -r examples/ --format custom --msg-template \\
        "{relpath:20.20s}: {line:03}: {test_id:^8}: DEFECT: {msg:>20}"

        See python documentation for more information about formatting style:
        https://docs.python.org/3/library/string.html

    The following tests were discovered and loaded:
    -----------------------------------------------
    """)
    parser.epilog = dedent_text + f"\t{plugin_list}"

    # setup work - parse arguments, and initialize BanditManager
    args = parser.parse_args()
    # Check if `--msg-template` is not present without custom formatter
    if args.output_format != "custom" and args.msg_template is not None:
        parser.error("--msg-template can only be used with --format=custom")

    # Check if confidence or severity level have been specified with strings
    if args.severity_string is not None:
        if args.severity_string == "all":
            args.severity = 1
        elif args.severity_string == "low":
            args.severity = 2
        elif args.severity_string == "medium":
            args.severity = 3
        elif args.severity_string == "high":
            args.severity = 4
        # Other strings will be blocked by argparse

    if args.confidence_string is not None:
        if args.confidence_string == "all":
            args.confidence = 1
        elif args.confidence_string == "low":
            args.confidence = 2
        elif args.confidence_string == "medium":
            args.confidence = 3
        elif args.confidence_string == "high":
            args.confidence = 4
        # Other strings will be blocked by argparse

    try:
        b_conf = b_config.BanditConfig(config_file=args.config_file)
    except utils.ConfigError as e:
        LOG.error(e)
        sys.exit(2)

    # Handle .bandit files in projects to pass cmdline args from file
    ini_options = _get_options_from_ini(args.ini_path, args.targets)
    if ini_options:
        # prefer command line, then ini file
        args.excluded_paths = _log_option_source(
            parser.get_default("excluded_paths"),
            args.excluded_paths,
            ini_options.get("exclude"),
            "excluded paths",
        )

        args.skips = _log_option_source(
            parser.get_default("skips"),
            args.skips,
            ini_options.get("skips"),
            "skipped tests",
        )

        args.tests = _log_option_source(
            parser.get_default("tests"),
            args.tests,
            ini_options.get("tests"),
            "selected tests",
        )

        ini_targets = ini_options.get("targets")
        if ini_targets:
            ini_targets = ini_targets.split(",")

        args.targets = _log_option_source(
            parser.get_default("targets"),
            args.targets,
            ini_targets,
            "selected targets",
        )

        # TODO(tmcpeak): any other useful options to pass from .bandit?

        args.recursive = _log_option_source(
            parser.get_default("recursive"),
            args.recursive,
            ini_options.get("recursive"),
            "recursive scan",
        )

        args.agg_type = _log_option_source(
            parser.get_default("agg_type"),
            args.agg_type,
            ini_options.get("aggregate"),
            "aggregate output type",
        )

        args.context_lines = _log_option_source(
            parser.get_default("context_lines"),
            args.context_lines,
            ini_options.get("number"),
            "max code lines output for issue",
        )

        args.profile = _log_option_source(
            parser.get_default("profile"),
            args.profile,
            ini_options.get("profile"),
            "profile",
        )

        args.severity = _log_option_source(
            parser.get_default("severity"),
            args.severity,
            ini_options.get("level"),
            "severity level",
        )

        args.confidence = _log_option_source(
            parser.get_default("confidence"),
            args.confidence,
            ini_options.get("confidence"),
            "confidence level",
        )

        args.output_format = _log_option_source(
            parser.get_default("output_format"),
            args.output_format,
            ini_options.get("format"),
            "output format",
        )

        args.msg_template = _log_option_source(
            parser.get_default("msg_template"),
            args.msg_template,
            ini_options.get("msg-template"),
            "output message template",
        )

        args.output_file = _log_option_source(
            parser.get_default("output_file"),
            args.output_file,
            ini_options.get("output"),
            "output file",
        )

        args.verbose = _log_option_source(
            parser.get_default("verbose"),
            args.verbose,
            ini_options.get("verbose"),
            "output extra information",
        )

        args.debug = _log_option_source(
            parser.get_default("debug"),
            args.debug,
            ini_options.get("debug"),
            "debug mode",
        )

        args.quiet = _log_option_source(
            parser.get_default("quiet"),
            args.quiet,
            ini_options.get("quiet"),
            "silent mode",
        )

        args.ignore_nosec = _log_option_source(
            parser.get_default("ignore_nosec"),
            args.ignore_nosec,
            ini_options.get("ignore-nosec"),
            "do not skip lines with # nosec",
        )

        args.baseline = _log_option_source(
            parser.get_default("baseline"),
            args.baseline,
            ini_options.get("baseline"),
            "path of a baseline report",
        )

    if not args.targets:
        LOG.error("No targets found in CLI or ini files, exiting.")
        sys.exit(2)
    # if the log format string was set in the options, reinitialize
    if b_conf.get_option("log_format"):
        log_format = b_conf.get_option("log_format")
        _init_logger(log_level=logging.DEBUG, log_format=log_format)

    if args.quiet:
        _init_logger(log_level=logging.WARN)

    try:
        profile = _get_profile(b_conf, args.profile, args.config_file)
        _log_info(args, profile)

        profile["include"].update(args.tests.split(",") if args.tests else [])
        profile["exclude"].update(args.skips.split(",") if args.skips else [])
        extension_mgr.validate_profile(profile)

    except (utils.ProfileNotFound, ValueError) as e:
        LOG.error(e)
        sys.exit(2)

    b_mgr = b_manager.BanditManager(
        b_conf,
        args.agg_type,
        args.debug,
        profile=profile,
        verbose=args.verbose,
        quiet=args.quiet,
        ignore_nosec=args.ignore_nosec,
    )

    if args.baseline is not None:
        try:
            with open(args.baseline) as bl:
                data = bl.read()
                b_mgr.populate_baseline(data)
        except OSError:
            LOG.warning("Could not open baseline report: %s", args.baseline)
            sys.exit(2)

        if args.output_format not in baseline_formatters:
            LOG.warning("Baseline must be used with one of the following "
                        "formats: " + str(baseline_formatters))
            sys.exit(2)

    if args.output_format != "json":
        if args.config_file:
            LOG.info("using config: %s", args.config_file)

        LOG.info(
            "running on Python %d.%d.%d",
            sys.version_info.major,
            sys.version_info.minor,
            sys.version_info.micro,
        )

    # initiate file discovery step within Bandit Manager
    b_mgr.discover_files(args.targets, args.recursive, args.excluded_paths)

    if not b_mgr.b_ts.tests:
        LOG.error("No tests would be run, please check the profile.")
        sys.exit(2)

    # initiate execution of tests within Bandit Manager
    b_mgr.run_tests()
    LOG.debug(b_mgr.b_ma)
    LOG.debug(b_mgr.metrics)

    # trigger output of results by Bandit Manager
    sev_level = constants.RANKING[args.severity - 1]
    conf_level = constants.RANKING[args.confidence - 1]
    b_mgr.output_results(
        args.context_lines,
        sev_level,
        conf_level,
        args.output_file,
        args.output_format,
        args.msg_template,
    )

    if (b_mgr.results_count(sev_filter=sev_level, conf_filter=conf_level) > 0
            and not args.exit_zero):
        sys.exit(1)
    else:
        sys.exit(0)