def test_issue_str(self): test_issue = _get_issue_instance() expect = ("Issue: 'Test issue' from B999:bandit_plugin:" " CWE: %s," " Severity: MEDIUM " "Confidence: MEDIUM at code.py:1:8") self.assertEqual(expect % str(issue.Cwe(issue.Cwe.MULTIPLE_BINDS)), str(test_issue))
def test_populate_baseline_success(self): # Test populate_baseline with valid JSON baseline_data = """{ "results": [ { "code": "test code", "filename": "example_file.py", "issue_severity": "low", "issue_cwe": { "id": 605, "link": "%s" }, "issue_confidence": "low", "issue_text": "test issue", "test_name": "some_test", "test_id": "x", "line_number": "n", "line_range": "n-m" } ] } """ % ("https://cwe.mitre.org/data/definitions/605.html") issue_dictionary = { "code": "test code", "filename": "example_file.py", "issue_severity": "low", "issue_cwe": issue.Cwe(issue.Cwe.MULTIPLE_BINDS).as_dict(), "issue_confidence": "low", "issue_text": "test issue", "test_name": "some_test", "test_id": "x", "line_number": "n", "line_range": "n-m", } baseline_items = [issue.issue_from_dict(issue_dictionary)] self.manager.populate_baseline(baseline_data) self.assertEqual(baseline_items, self.manager.baseline)
def test_report_nobaseline(self, get_issue_list): conf = config.BanditConfig() self.manager = manager.BanditManager(conf, "file") (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname self.manager.verbose = True self.manager.files_list = ["binding.py"] self.manager.scores = [ {"SEVERITY": [0, 0, 0, 1], "CONFIDENCE": [0, 0, 0, 1]} ] self.manager.skipped = [("abc.py", "File is bad")] self.manager.excluded_files = ["def.py"] issue_a = _get_issue_instance() issue_b = _get_issue_instance() get_issue_list.return_value = [issue_a, issue_b] self.manager.metrics.data["_totals"] = { "loc": 1000, "nosec": 50, "skipped_tests": 0, } for category in ["SEVERITY", "CONFIDENCE"]: for level in ["UNDEFINED", "LOW", "MEDIUM", "HIGH"]: self.manager.metrics.data["_totals"][f"{category}.{level}"] = 1 # Validate that we're outputting the correct issues output_str_fn = "bandit.formatters.text._output_issue_str" with mock.patch(output_str_fn) as output_str: output_str.return_value = "ISSUE_OUTPUT_TEXT" with open(self.tmp_fname, "w") as tmp_file: b_text.report( self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5 ) calls = [ mock.call(issue_a, "", lines=5), mock.call(issue_b, "", lines=5), ] output_str.assert_has_calls(calls, any_order=True) # Validate that we're outputting all of the expected fields and the # correct values with open(self.tmp_fname, "w") as tmp_file: b_text.report( self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5 ) with open(self.tmp_fname) as f: data = f.read() expected_items = [ "Run started", "Files in scope (1)", "binding.py (score: ", "CONFIDENCE: 1", "SEVERITY: 1", "CWE: %s" % str(issue.Cwe(issue.Cwe.MULTIPLE_BINDS)), "Files excluded (1):", "def.py", "Undefined: 1", "Low: 1", "Medium: 1", "High: 1", "Total lines skipped ", "(#nosec): 50", "Total potential issues skipped due to specifically being ", "disabled (e.g., #nosec BXXX): 0", "Total issues (by severity)", "Total issues (by confidence)", "Files skipped (1)", "abc.py (File is bad)", ] for item in expected_items: self.assertIn(item, data)