Exemple #1
0
    def test_report_baseline(self, get_issue_list):
        cfg_file = os.path.join(os.getcwd(), 'bandit/config/bandit.yaml')
        conf = config.BanditConfig(cfg_file)
        self.manager = manager.BanditManager(conf, 'file')

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        issue_a = _get_issue_instance()
        issue_b = _get_issue_instance()

        issue_x = _get_issue_instance()
        issue_x.fname = 'x'
        issue_y = _get_issue_instance()
        issue_y.fname = 'y'
        issue_z = _get_issue_instance()
        issue_z.fname = 'z'

        get_issue_list.return_value = OrderedDict([(issue_a, [issue_x]),
                                                   (issue_b, [issue_y, issue_z])])

        # Validate that we're outputting the correct issues
        indent_val = ' ' * 10
        output_str_fn = 'bandit.formatters.screen._output_issue_str'
        with mock.patch(output_str_fn) as output_str:
            screen.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW, lines=5)

            calls = [mock.call(issue_a, '', lines=5),
                     mock.call(issue_b, '', show_code=False, show_lineno=False),
                     mock.call(issue_y, indent_val, lines=5),
                     mock.call(issue_z, indent_val, lines=5)]

            output_str.assert_has_calls(calls, any_order=True)
Exemple #2
0
    def test_no_issues(self, get_issue_list):
        conf = config.BanditConfig()
        self.manager = manager.BanditManager(conf, 'file')

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        get_issue_list.return_value = OrderedDict()
        with mock.patch('bandit.formatters.screen.do_print') as m:
            screen.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW,
                          lines=5)
            self.assertIn('No issues identified.',
                          '\n'.join([str(a) for a in m.call_args]))
Exemple #3
0
    def test_no_issues(self, get_issue_list):
        conf = config.BanditConfig()
        self.manager = manager.BanditManager(conf, 'file')

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        get_issue_list.return_value = collections.OrderedDict()
        with mock.patch('bandit.formatters.screen.do_print') as m:
            tmp_file = open(self.tmp_fname, 'w')
            screen.report(self.manager, tmp_file, bandit.LOW, bandit.LOW,
                          lines=5)
            self.assertIn('No issues identified.',
                          '\n'.join([str(a) for a in m.call_args]))
Exemple #4
0
    def test_no_issues(self, get_issue_list):
        conf = config.BanditConfig()
        self.manager = manager.BanditManager(conf, "file")

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        get_issue_list.return_value = collections.OrderedDict()
        with mock.patch("bandit.formatters.screen.do_print") as m:
            with open(self.tmp_fname, "w") as tmp_file:
                screen.report(
                    self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5
                )
            self.assertIn(
                "No issues identified.",
                "\n".join([str(a) for a in m.call_args]),
            )
Exemple #5
0
    def test_no_issues(self, get_issue_list):
        cfg_file = os.path.join(os.getcwd(), 'bandit/config/bandit.yaml')
        conf = config.BanditConfig(cfg_file)
        self.manager = manager.BanditManager(conf, 'file')

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        get_issue_list.return_value = OrderedDict()
        with mock.patch('bandit.formatters.screen.do_print') as m:
            screen.report(self.manager,
                          self.tmp_fname,
                          bandit.LOW,
                          bandit.LOW,
                          lines=5)
            self.assertIn('No issues identified.',
                          '\n'.join([str(a) for a in m.call_args]))
Exemple #6
0
    def test_report_baseline(self, get_issue_list):
        cfg_file = os.path.join(os.getcwd(), 'bandit/config/bandit.yaml')
        conf = config.BanditConfig(cfg_file)
        self.manager = manager.BanditManager(conf, 'file')

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        issue_a = _get_issue_instance()
        issue_b = _get_issue_instance()

        issue_x = _get_issue_instance()
        issue_x.fname = 'x'
        issue_y = _get_issue_instance()
        issue_y.fname = 'y'
        issue_z = _get_issue_instance()
        issue_z.fname = 'z'

        get_issue_list.return_value = OrderedDict([(issue_a, [issue_x]),
                                                   (issue_b,
                                                    [issue_y, issue_z])])

        # Validate that we're outputting the correct issues
        indent_val = ' ' * 10
        output_str_fn = 'bandit.formatters.screen._output_issue_str'
        with mock.patch(output_str_fn) as output_str:
            output_str.return_value = 'ISSUE_OUTPUT_TEXT'

            screen.report(self.manager,
                          self.tmp_fname,
                          bandit.LOW,
                          bandit.LOW,
                          lines=5)

            calls = [
                mock.call(issue_a, '', lines=5),
                mock.call(issue_b, '', show_code=False, show_lineno=False),
                mock.call(issue_y, indent_val, lines=5),
                mock.call(issue_z, indent_val, lines=5)
            ]

            output_str.assert_has_calls(calls, any_order=True)
Exemple #7
0
    def test_report_baseline(self, get_issue_list):
        conf = config.BanditConfig()
        self.manager = manager.BanditManager(conf, "file")

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        issue_a = _get_issue_instance()
        issue_b = _get_issue_instance()

        issue_x = _get_issue_instance()
        issue_x.fname = "x"
        issue_y = _get_issue_instance()
        issue_y.fname = "y"
        issue_z = _get_issue_instance()
        issue_z.fname = "z"

        get_issue_list.return_value = collections.OrderedDict(
            [(issue_a, [issue_x]), (issue_b, [issue_y, issue_z])]
        )

        # Validate that we're outputting the correct issues
        indent_val = " " * 10
        output_str_fn = "bandit.formatters.screen._output_issue_str"
        with mock.patch(output_str_fn) as output_str:
            output_str.return_value = "ISSUE_OUTPUT_TEXT"

            with open(self.tmp_fname, "w") as tmp_file:
                screen.report(
                    self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5
                )

            calls = [
                mock.call(issue_a, "", lines=5),
                mock.call(issue_b, "", show_code=False, show_lineno=False),
                mock.call(issue_y, indent_val, lines=5),
                mock.call(issue_z, indent_val, lines=5),
            ]

            output_str.assert_has_calls(calls, any_order=True)
Exemple #8
0
    def test_report_nobaseline(self, get_issue_list):
        conf = config.BanditConfig()
        self.manager = manager.BanditManager(conf, 'file')

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        self.manager.verbose = True
        self.manager.files_list = ['binding.py']

        self.manager.scores = [{'SEVERITY': [0, 0, 0, 1],
                                'CONFIDENCE': [0, 0, 0, 1]}]

        self.manager.skipped = [('abc.py', 'File is bad')]
        self.manager.excluded_files = ['def.py']

        issue_a = _get_issue_instance()
        issue_b = _get_issue_instance()

        get_issue_list.return_value = [issue_a, issue_b]

        self.manager.metrics.data['_totals'] = {'loc': 1000, 'nosec': 50}
        for category in ['SEVERITY', 'CONFIDENCE']:
            for level in ['UNDEFINED', 'LOW', 'MEDIUM', 'HIGH']:
                self.manager.metrics.data['_totals']['%s.%s' %
                                                     (category, level)] = 1

        # Validate that we're outputting the correct issues
        output_str_fn = 'bandit.formatters.screen._output_issue_str'
        with mock.patch(output_str_fn) as output_str:
            output_str.return_value = 'ISSUE_OUTPUT_TEXT'

            screen.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW,
                          lines=5)

            calls = [mock.call(issue_a, '', lines=5),
                     mock.call(issue_b, '', lines=5)]

            output_str.assert_has_calls(calls, any_order=True)

        # Validate that we're outputting all of the expected fields and the
        # correct values
        with mock.patch('bandit.formatters.screen.do_print') as m:
            screen.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW,
                          lines=5)

            data = '\n'.join([str(a) for a in m.call_args[0][0]])

            expected = 'Run started'
            self.assertIn(expected, data)

            expected_items = [
                screen.header('Files in scope (1):'),
                '\n\tbinding.py (score: {SEVERITY: 1, CONFIDENCE: 1})']

            for item in expected_items:
                self.assertIn(item, data)

            expected = screen.header('Files excluded (1):') + '\n\tdef.py'
            self.assertIn(expected, data)

            expected = ('Total lines of code: 1000\n\tTotal lines skipped '
                        '(#nosec): 50')
            self.assertIn(expected, data)

            expected = ('Total issues (by severity):\n\t\tUndefined: 1\n\t\t'
                        'Low: 1\n\t\tMedium: 1\n\t\tHigh: 1')
            self.assertIn(expected, data)

            expected = ('Total issues (by confidence):\n\t\tUndefined: 1\n\t\t'
                        'Low: 1\n\t\tMedium: 1\n\t\tHigh: 1')
            self.assertIn(expected, data)

            expected = (screen.header('Files skipped (1):') +
                        '\n\tabc.py (File is bad)')
            self.assertIn(expected, data)
Exemple #9
0
    def test_report_nobaseline(self, get_issue_list):
        conf = config.BanditConfig()
        self.manager = manager.BanditManager(conf, "file")

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        self.manager.verbose = True
        self.manager.files_list = ["binding.py"]

        self.manager.scores = [
            {"SEVERITY": [0, 0, 0, 1], "CONFIDENCE": [0, 0, 0, 1]}
        ]

        self.manager.skipped = [("abc.py", "File is bad")]
        self.manager.excluded_files = ["def.py"]

        issue_a = _get_issue_instance()
        issue_b = _get_issue_instance()

        get_issue_list.return_value = [issue_a, issue_b]

        self.manager.metrics.data["_totals"] = {"loc": 1000, "nosec": 50}
        for category in ["SEVERITY", "CONFIDENCE"]:
            for level in ["UNDEFINED", "LOW", "MEDIUM", "HIGH"]:
                self.manager.metrics.data["_totals"][f"{category}.{level}"] = 1

        # Validate that we're outputting the correct issues
        output_str_fn = "bandit.formatters.screen._output_issue_str"
        with mock.patch(output_str_fn) as output_str:
            output_str.return_value = "ISSUE_OUTPUT_TEXT"

            with open(self.tmp_fname, "w") as tmp_file:
                screen.report(
                    self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5
                )

            calls = [
                mock.call(issue_a, "", lines=5),
                mock.call(issue_b, "", lines=5),
            ]

            output_str.assert_has_calls(calls, any_order=True)

        # Validate that we're outputting all of the expected fields and the
        # correct values
        with mock.patch("bandit.formatters.screen.do_print") as m:
            with open(self.tmp_fname, "w") as tmp_file:
                screen.report(
                    self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5
                )

            data = "\n".join([str(a) for a in m.call_args[0][0]])

            expected = "Run started"
            self.assertIn(expected, data)

            expected_items = [
                screen.header("Files in scope (1):"),
                "\n\tbinding.py (score: {SEVERITY: 1, CONFIDENCE: 1})",
            ]

            for item in expected_items:
                self.assertIn(item, data)

            expected = screen.header("Files excluded (1):") + "\n\tdef.py"
            self.assertIn(expected, data)

            expected = (
                "Total lines of code: 1000\n\tTotal lines skipped "
                "(#nosec): 50"
            )
            self.assertIn(expected, data)

            expected = (
                "Total issues (by severity):\n\t\tUndefined: 1\n\t\t"
                "Low: 1\n\t\tMedium: 1\n\t\tHigh: 1"
            )
            self.assertIn(expected, data)

            expected = (
                "Total issues (by confidence):\n\t\tUndefined: 1\n\t\t"
                "Low: 1\n\t\tMedium: 1\n\t\tHigh: 1"
            )
            self.assertIn(expected, data)

            expected = (
                screen.header("Files skipped (1):")
                + "\n\tabc.py (File is bad)"
            )
            self.assertIn(expected, data)
Exemple #10
0
    def test_report_nobaseline(self, get_issue_list):
        conf = config.BanditConfig()
        self.manager = manager.BanditManager(conf, 'file')

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        self.manager.verbose = True
        self.manager.files_list = ['binding.py']

        self.manager.scores = [{'SEVERITY': [0, 0, 0, 1],
                                'CONFIDENCE': [0, 0, 0, 1]}]

        self.manager.skipped = [('abc.py', 'File is bad')]
        self.manager.excluded_files = ['def.py']

        issue_a = _get_issue_instance()
        issue_b = _get_issue_instance()

        get_issue_list.return_value = [issue_a, issue_b]

        self.manager.metrics.data['_totals'] = {'loc': 1000, 'nosec': 50}
        for category in ['SEVERITY', 'CONFIDENCE']:
            for level in ['UNDEFINED', 'LOW', 'MEDIUM', 'HIGH']:
                self.manager.metrics.data['_totals']['%s.%s' %
                                                     (category, level)] = 1

        # Validate that we're outputting the correct issues
        output_str_fn = 'bandit.formatters.screen._output_issue_str'
        with mock.patch(output_str_fn) as output_str:
            output_str.return_value = 'ISSUE_OUTPUT_TEXT'

            tmp_file = open(self.tmp_fname, 'w')
            screen.report(self.manager, tmp_file, bandit.LOW, bandit.LOW,
                          lines=5)

            calls = [mock.call(issue_a, '', lines=5),
                     mock.call(issue_b, '', lines=5)]

            output_str.assert_has_calls(calls, any_order=True)

        # Validate that we're outputting all of the expected fields and the
        # correct values
        with mock.patch('bandit.formatters.screen.do_print') as m:
            tmp_file = open(self.tmp_fname, 'w')
            screen.report(self.manager, tmp_file, bandit.LOW, bandit.LOW,
                          lines=5)

            data = '\n'.join([str(a) for a in m.call_args[0][0]])

            expected = 'Run started'
            self.assertIn(expected, data)

            expected_items = [
                screen.header('Files in scope (1):'),
                '\n\tbinding.py (score: {SEVERITY: 1, CONFIDENCE: 1})']

            for item in expected_items:
                self.assertIn(item, data)

            expected = screen.header('Files excluded (1):') + '\n\tdef.py'
            self.assertIn(expected, data)

            expected = ('Total lines of code: 1000\n\tTotal lines skipped '
                        '(#nosec): 50')
            self.assertIn(expected, data)

            expected = ('Total issues (by severity):\n\t\tUndefined: 1\n\t\t'
                        'Low: 1\n\t\tMedium: 1\n\t\tHigh: 1')
            self.assertIn(expected, data)

            expected = ('Total issues (by confidence):\n\t\tUndefined: 1\n\t\t'
                        'Low: 1\n\t\tMedium: 1\n\t\tHigh: 1')
            self.assertIn(expected, data)

            expected = (screen.header('Files skipped (1):') +
                        '\n\tabc.py (File is bad)')
            self.assertIn(expected, data)