Example #1
0
    def test_report_nobaseline(self, get_issue_list):
        conf = config.BanditConfig()
        self.manager = manager.BanditManager(conf, "file")

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        self.manager.verbose = True
        self.manager.files_list = ["binding.py"]

        self.manager.scores = [
            {"SEVERITY": [0, 0, 0, 1], "CONFIDENCE": [0, 0, 0, 1]}
        ]

        self.manager.skipped = [("abc.py", "File is bad")]
        self.manager.excluded_files = ["def.py"]

        issue_a = _get_issue_instance()
        issue_b = _get_issue_instance()

        get_issue_list.return_value = [issue_a, issue_b]

        self.manager.metrics.data["_totals"] = {"loc": 1000, "nosec": 50}
        for category in ["SEVERITY", "CONFIDENCE"]:
            for level in ["UNDEFINED", "LOW", "MEDIUM", "HIGH"]:
                self.manager.metrics.data["_totals"][f"{category}.{level}"] = 1

        # Validate that we're outputting the correct issues
        output_str_fn = "bandit.formatters.screen._output_issue_str"
        with mock.patch(output_str_fn) as output_str:
            output_str.return_value = "ISSUE_OUTPUT_TEXT"

            with open(self.tmp_fname, "w") as tmp_file:
                screen.report(
                    self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5
                )

            calls = [
                mock.call(issue_a, "", lines=5),
                mock.call(issue_b, "", lines=5),
            ]

            output_str.assert_has_calls(calls, any_order=True)

        # Validate that we're outputting all of the expected fields and the
        # correct values
        with mock.patch("bandit.formatters.screen.do_print") as m:
            with open(self.tmp_fname, "w") as tmp_file:
                screen.report(
                    self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5
                )

            data = "\n".join([str(a) for a in m.call_args[0][0]])

            expected = "Run started"
            self.assertIn(expected, data)

            expected_items = [
                screen.header("Files in scope (1):"),
                "\n\tbinding.py (score: {SEVERITY: 1, CONFIDENCE: 1})",
            ]

            for item in expected_items:
                self.assertIn(item, data)

            expected = screen.header("Files excluded (1):") + "\n\tdef.py"
            self.assertIn(expected, data)

            expected = (
                "Total lines of code: 1000\n\tTotal lines skipped "
                "(#nosec): 50"
            )
            self.assertIn(expected, data)

            expected = (
                "Total issues (by severity):\n\t\tUndefined: 1\n\t\t"
                "Low: 1\n\t\tMedium: 1\n\t\tHigh: 1"
            )
            self.assertIn(expected, data)

            expected = (
                "Total issues (by confidence):\n\t\tUndefined: 1\n\t\t"
                "Low: 1\n\t\tMedium: 1\n\t\tHigh: 1"
            )
            self.assertIn(expected, data)

            expected = (
                screen.header("Files skipped (1):")
                + "\n\tabc.py (File is bad)"
            )
            self.assertIn(expected, data)
Example #2
0
    def test_report_nobaseline(self, get_issue_list):
        conf = config.BanditConfig()
        self.manager = manager.BanditManager(conf, 'file')

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        self.manager.verbose = True
        self.manager.files_list = ['binding.py']

        self.manager.scores = [{'SEVERITY': [0, 0, 0, 1],
                                'CONFIDENCE': [0, 0, 0, 1]}]

        self.manager.skipped = [('abc.py', 'File is bad')]
        self.manager.excluded_files = ['def.py']

        issue_a = _get_issue_instance()
        issue_b = _get_issue_instance()

        get_issue_list.return_value = [issue_a, issue_b]

        self.manager.metrics.data['_totals'] = {'loc': 1000, 'nosec': 50}
        for category in ['SEVERITY', 'CONFIDENCE']:
            for level in ['UNDEFINED', 'LOW', 'MEDIUM', 'HIGH']:
                self.manager.metrics.data['_totals']['%s.%s' %
                                                     (category, level)] = 1

        # Validate that we're outputting the correct issues
        output_str_fn = 'bandit.formatters.screen._output_issue_str'
        with mock.patch(output_str_fn) as output_str:
            output_str.return_value = 'ISSUE_OUTPUT_TEXT'

            screen.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW,
                          lines=5)

            calls = [mock.call(issue_a, '', lines=5),
                     mock.call(issue_b, '', lines=5)]

            output_str.assert_has_calls(calls, any_order=True)

        # Validate that we're outputting all of the expected fields and the
        # correct values
        with mock.patch('bandit.formatters.screen.do_print') as m:
            screen.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW,
                          lines=5)

            data = '\n'.join([str(a) for a in m.call_args[0][0]])

            expected = 'Run started'
            self.assertIn(expected, data)

            expected_items = [
                screen.header('Files in scope (1):'),
                '\n\tbinding.py (score: {SEVERITY: 1, CONFIDENCE: 1})']

            for item in expected_items:
                self.assertIn(item, data)

            expected = screen.header('Files excluded (1):') + '\n\tdef.py'
            self.assertIn(expected, data)

            expected = ('Total lines of code: 1000\n\tTotal lines skipped '
                        '(#nosec): 50')
            self.assertIn(expected, data)

            expected = ('Total issues (by severity):\n\t\tUndefined: 1\n\t\t'
                        'Low: 1\n\t\tMedium: 1\n\t\tHigh: 1')
            self.assertIn(expected, data)

            expected = ('Total issues (by confidence):\n\t\tUndefined: 1\n\t\t'
                        'Low: 1\n\t\tMedium: 1\n\t\tHigh: 1')
            self.assertIn(expected, data)

            expected = (screen.header('Files skipped (1):') +
                        '\n\tabc.py (File is bad)')
            self.assertIn(expected, data)
Example #3
0
    def test_report_nobaseline(self, get_issue_list):
        conf = config.BanditConfig()
        self.manager = manager.BanditManager(conf, 'file')

        (tmp_fd, self.tmp_fname) = tempfile.mkstemp()
        self.manager.out_file = self.tmp_fname

        self.manager.verbose = True
        self.manager.files_list = ['binding.py']

        self.manager.scores = [{'SEVERITY': [0, 0, 0, 1],
                                'CONFIDENCE': [0, 0, 0, 1]}]

        self.manager.skipped = [('abc.py', 'File is bad')]
        self.manager.excluded_files = ['def.py']

        issue_a = _get_issue_instance()
        issue_b = _get_issue_instance()

        get_issue_list.return_value = [issue_a, issue_b]

        self.manager.metrics.data['_totals'] = {'loc': 1000, 'nosec': 50}
        for category in ['SEVERITY', 'CONFIDENCE']:
            for level in ['UNDEFINED', 'LOW', 'MEDIUM', 'HIGH']:
                self.manager.metrics.data['_totals']['%s.%s' %
                                                     (category, level)] = 1

        # Validate that we're outputting the correct issues
        output_str_fn = 'bandit.formatters.screen._output_issue_str'
        with mock.patch(output_str_fn) as output_str:
            output_str.return_value = 'ISSUE_OUTPUT_TEXT'

            tmp_file = open(self.tmp_fname, 'w')
            screen.report(self.manager, tmp_file, bandit.LOW, bandit.LOW,
                          lines=5)

            calls = [mock.call(issue_a, '', lines=5),
                     mock.call(issue_b, '', lines=5)]

            output_str.assert_has_calls(calls, any_order=True)

        # Validate that we're outputting all of the expected fields and the
        # correct values
        with mock.patch('bandit.formatters.screen.do_print') as m:
            tmp_file = open(self.tmp_fname, 'w')
            screen.report(self.manager, tmp_file, bandit.LOW, bandit.LOW,
                          lines=5)

            data = '\n'.join([str(a) for a in m.call_args[0][0]])

            expected = 'Run started'
            self.assertIn(expected, data)

            expected_items = [
                screen.header('Files in scope (1):'),
                '\n\tbinding.py (score: {SEVERITY: 1, CONFIDENCE: 1})']

            for item in expected_items:
                self.assertIn(item, data)

            expected = screen.header('Files excluded (1):') + '\n\tdef.py'
            self.assertIn(expected, data)

            expected = ('Total lines of code: 1000\n\tTotal lines skipped '
                        '(#nosec): 50')
            self.assertIn(expected, data)

            expected = ('Total issues (by severity):\n\t\tUndefined: 1\n\t\t'
                        'Low: 1\n\t\tMedium: 1\n\t\tHigh: 1')
            self.assertIn(expected, data)

            expected = ('Total issues (by confidence):\n\t\tUndefined: 1\n\t\t'
                        'Low: 1\n\t\tMedium: 1\n\t\tHigh: 1')
            self.assertIn(expected, data)

            expected = (screen.header('Files skipped (1):') +
                        '\n\tabc.py (File is bad)')
            self.assertIn(expected, data)