def test_report_baseline(self, get_issue_list): cfg_file = os.path.join(os.getcwd(), 'bandit/config/bandit.yaml') conf = config.BanditConfig(cfg_file) self.manager = manager.BanditManager(conf, 'file') (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname issue_a = _get_issue_instance() issue_b = _get_issue_instance() issue_x = _get_issue_instance() issue_x.fname = 'x' issue_y = _get_issue_instance() issue_y.fname = 'y' issue_z = _get_issue_instance() issue_z.fname = 'z' get_issue_list.return_value = OrderedDict([(issue_a, [issue_x]), (issue_b, [issue_y, issue_z])]) # Validate that we're outputting the correct issues indent_val = ' ' * 10 output_str_fn = 'bandit.formatters.text._output_issue_str' with mock.patch(output_str_fn) as output_str: b_text.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW, lines=5) calls = [mock.call(issue_a, '', lines=5), mock.call(issue_b, '', show_code=False, show_lineno=False), mock.call(issue_y, indent_val, lines=5), mock.call(issue_z, indent_val, lines=5)] output_str.assert_has_calls(calls, any_order=True)
def test_report(self): self.manager.verbose = True file_list = ['binding.py'] scores = [{'SEVERITY': [0] * len(constants.RANKING), 'CONFIDENCE': [0] * len(constants.RANKING)}] exc_files = ['test_binding.py'] b_text.report(self.manager, self.tmp_fname, self.issue.severity, self.issue.confidence) with open(self.tmp_fname) as f: data = f.read() expected = '>> Issue: [%s] %s' % (self.issue.test, self.issue.text) self.assertIn(expected, data) expected = ' Severity: %s Confidence: %s' % ( self.issue.severity.capitalize(), self.issue.confidence.capitalize()) self.assertIn(expected, data) expected = ' Location: %s:%d' % (self.tmp_fname, self.context['lineno']) self.assertIn(expected, data) expected = 'Total lines of code: {0}'.format( self.manager.metrics.data['_totals']['loc'] ) self.assertIn(expected, data) expected = 'Total lines skipped (#nosec): {0}'.format( self.manager.metrics.data['_totals']['nosec'] ) self.assertIn(expected, data)
def test_no_issues(self, get_issue_list): conf = config.BanditConfig() self.manager = manager.BanditManager(conf, 'file') (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname get_issue_list.return_value = OrderedDict() b_text.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW, lines=5) with open(self.tmp_fname) as f: data = f.read() self.assertIn('No issues identified.', data)
def test_no_issues(self, get_issue_list): conf = config.BanditConfig() self.manager = manager.BanditManager(conf, 'file') (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname get_issue_list.return_value = collections.OrderedDict() tmp_file = open(self.tmp_fname, 'w') b_text.report(self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5) with open(self.tmp_fname) as f: data = f.read() self.assertIn('No issues identified.', data)
def test_no_issues(self, get_issue_list): cfg_file = os.path.join(os.getcwd(), 'bandit/config/bandit.yaml') conf = config.BanditConfig(cfg_file) self.manager = manager.BanditManager(conf, 'file') (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname get_issue_list.return_value = OrderedDict() b_text.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW, lines=5) with open(self.tmp_fname) as f: data = f.read() self.assertIn('No issues identified.', data)
def test_report_baseline(self, get_issue_list): conf = config.BanditConfig() self.manager = manager.BanditManager(conf, 'file') (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname issue_a = _get_issue_instance() issue_b = _get_issue_instance() issue_x = _get_issue_instance() issue_x.fname = 'x' issue_y = _get_issue_instance() issue_y.fname = 'y' issue_z = _get_issue_instance() issue_z.fname = 'z' get_issue_list.return_value = collections.OrderedDict([ (issue_a, [issue_x]), (issue_b, [issue_y, issue_z]) ]) # Validate that we're outputting the correct issues indent_val = ' ' * 10 output_str_fn = 'bandit.formatters.text._output_issue_str' with mock.patch(output_str_fn) as output_str: output_str.return_value = 'ISSUE_OUTPUT_TEXT' with open(self.tmp_fname, 'w') as tmp_file: b_text.report(self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5) calls = [ mock.call(issue_a, '', lines=5), mock.call(issue_b, '', show_code=False, show_lineno=False), mock.call(issue_y, indent_val, lines=5), mock.call(issue_z, indent_val, lines=5) ] output_str.assert_has_calls(calls, any_order=True)
def test_report(self): self.manager.verbose = True file_list = ['binding.py'] scores = [{'SEVERITY': [0] * len(constants.RANKING), 'CONFIDENCE': [0] * len(constants.RANKING)}] exc_files = ['test_binding.py'] b_text.report(self.manager, self.tmp_fname, self.issue.severity, self.issue.confidence) with open(self.tmp_fname) as f: data = f.read() expected = '>> Issue: %s' % self.issue.text self.assertIn(expected, data) expected = ' Severity: %s Confidence: %s' % ( self.issue.severity.capitalize(), self.issue.confidence.capitalize()) self.assertIn(expected, data) expected = ' Location: %s:%d' % (self.tmp_fname, self.context['lineno']) self.assertIn(expected, data)
def test_report_baseline(self, get_issue_list): conf = config.BanditConfig() self.manager = manager.BanditManager(conf, "file") (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname issue_a = _get_issue_instance() issue_b = _get_issue_instance() issue_x = _get_issue_instance() issue_x.fname = "x" issue_y = _get_issue_instance() issue_y.fname = "y" issue_z = _get_issue_instance() issue_z.fname = "z" get_issue_list.return_value = OrderedDict([(issue_a, [issue_x]), (issue_b, [issue_y, issue_z])]) # Validate that we're outputting the correct issues indent_val = " " * 10 output_str_fn = "bandit.formatters.text._output_issue_str" with mock.patch(output_str_fn) as output_str: output_str.return_value = "ISSUE_OUTPUT_TEXT" tmp_file = open(self.tmp_fname, "w") b_text.report(self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5) calls = [ mock.call(issue_a, "", lines=5), mock.call(issue_b, "", show_code=False, show_lineno=False), mock.call(issue_y, indent_val, lines=5), mock.call(issue_z, indent_val, lines=5), ] output_str.assert_has_calls(calls, any_order=True)
def test_report_nobaseline(self, get_issue_list): conf = config.BanditConfig() self.manager = manager.BanditManager(conf, 'file') (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname self.manager.verbose = True self.manager.files_list = ['binding.py'] self.manager.scores = [{'SEVERITY': [0, 0, 0, 1], 'CONFIDENCE': [0, 0, 0, 1]}] self.manager.skipped = [('abc.py', 'File is bad')] self.manager.excluded_files = ['def.py'] issue_a = _get_issue_instance() issue_b = _get_issue_instance() get_issue_list.return_value = [issue_a, issue_b] self.manager.metrics.data['_totals'] = {'loc': 1000, 'nosec': 50} for category in ['SEVERITY', 'CONFIDENCE']: for level in ['UNDEFINED', 'LOW', 'MEDIUM', 'HIGH']: self.manager.metrics.data['_totals']['%s.%s' % (category, level)] = 1 # Validate that we're outputting the correct issues output_str_fn = 'bandit.formatters.text._output_issue_str' with mock.patch(output_str_fn) as output_str: output_str.return_value = 'ISSUE_OUTPUT_TEXT' b_text.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW, lines=5) calls = [mock.call(issue_a, '', lines=5), mock.call(issue_b, '', lines=5)] output_str.assert_has_calls(calls, any_order=True) # Validate that we're outputting all of the expected fields and the # correct values b_text.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW, lines=5) with open(self.tmp_fname) as f: data = f.read() expected_items = ['Run started', 'Files in scope (1)', 'binding.py (score: ', "CONFIDENCE: 1", "SEVERITY: 1", 'Files excluded (1):', 'def.py', 'Undefined: 1', 'Low: 1', 'Medium: 1', 'High: 1', 'Total lines skipped ', '(#nosec): 50', 'Total issues (by severity)', 'Total issues (by confidence)', 'Files skipped (1)', 'abc.py (File is bad)' ] for item in expected_items: self.assertIn(item, data)
def test_report_nobaseline(self, get_issue_list): cfg_file = os.path.join(os.getcwd(), 'bandit/config/bandit.yaml') conf = config.BanditConfig(cfg_file) self.manager = manager.BanditManager(conf, 'file') (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname self.manager.verbose = True self.manager.files_list = ['binding.py'] self.manager.scores = [{'SEVERITY': [0, 0, 0, 1], 'CONFIDENCE': [0, 0, 0, 1]}] self.manager.skipped = [('abc.py', 'File is bad')] self.manager.excluded_files = ['def.py'] issue_a = _get_issue_instance() issue_b = _get_issue_instance() get_issue_list.return_value = [issue_a, issue_b] self.manager.metrics.data['_totals'] = {'loc': 1000, 'nosec': 50} for category in ['SEVERITY', 'CONFIDENCE']: for level in ['UNDEFINED', 'LOW', 'MEDIUM', 'HIGH']: self.manager.metrics.data['_totals']['%s.%s' % (category, level)] = 1 # Validate that we're outputting the correct issues output_str_fn = 'bandit.formatters.text._output_issue_str' with mock.patch(output_str_fn) as output_str: output_str.return_value = 'ISSUE_OUTPUT_TEXT' b_text.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW, lines=5) calls = [mock.call(issue_a, '', lines=5), mock.call(issue_b, '', lines=5)] output_str.assert_has_calls(calls, any_order=True) # Validate that we're outputting all of the expected fields and the # correct values b_text.report(self.manager, self.tmp_fname, bandit.LOW, bandit.LOW, lines=5) with open(self.tmp_fname) as f: data = f.read() expected_items = ['Run started', 'Files in scope (1)', 'binding.py (score: ', "CONFIDENCE: 1", "SEVERITY: 1", 'Files excluded (1):', 'def.py', 'Undefined: 1', 'Low: 1', 'Medium: 1', 'High: 1', 'Total lines skipped ', '(#nosec): 50', 'Total issues (by severity)', 'Total issues (by confidence)', 'Files skipped (1)', 'abc.py (File is bad)' ] for item in expected_items: self.assertIn(item, data)
def test_report_nobaseline(self, get_issue_list): conf = config.BanditConfig() self.manager = manager.BanditManager(conf, "file") (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname self.manager.verbose = True self.manager.files_list = ["binding.py"] self.manager.scores = [{ "SEVERITY": [0, 0, 0, 1], "CONFIDENCE": [0, 0, 0, 1] }] self.manager.skipped = [("abc.py", "File is bad")] self.manager.excluded_files = ["def.py"] issue_a = _get_issue_instance() issue_b = _get_issue_instance() get_issue_list.return_value = [issue_a, issue_b] self.manager.metrics.data["_totals"] = {"loc": 1000, "nosec": 50} for category in ["SEVERITY", "CONFIDENCE"]: for level in ["UNDEFINED", "LOW", "MEDIUM", "HIGH"]: self.manager.metrics.data["_totals"][f"{category}.{level}"] = 1 # Validate that we're outputting the correct issues output_str_fn = "bandit.formatters.text._output_issue_str" with mock.patch(output_str_fn) as output_str: output_str.return_value = "ISSUE_OUTPUT_TEXT" with open(self.tmp_fname, "w") as tmp_file: b_text.report(self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5) calls = [ mock.call(issue_a, "", lines=5), mock.call(issue_b, "", lines=5), ] output_str.assert_has_calls(calls, any_order=True) # Validate that we're outputting all of the expected fields and the # correct values with open(self.tmp_fname, "w") as tmp_file: b_text.report(self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5) with open(self.tmp_fname) as f: data = f.read() expected_items = [ "Run started", "Files in scope (1)", "binding.py (score: ", "CONFIDENCE: 1", "SEVERITY: 1", "Files excluded (1):", "def.py", "Undefined: 1", "Low: 1", "Medium: 1", "High: 1", "Total lines skipped ", "(#nosec): 50", "Total issues (by severity)", "Total issues (by confidence)", "Files skipped (1)", "abc.py (File is bad)", ] for item in expected_items: self.assertIn(item, data)
def test_report_nobaseline(self, get_issue_list): conf = config.BanditConfig() self.manager = manager.BanditManager(conf, "file") (tmp_fd, self.tmp_fname) = tempfile.mkstemp() self.manager.out_file = self.tmp_fname self.manager.verbose = True self.manager.files_list = ["binding.py"] self.manager.scores = [{"SEVERITY": [0, 0, 0, 1], "CONFIDENCE": [0, 0, 0, 1]}] self.manager.skipped = [("abc.py", "File is bad")] self.manager.excluded_files = ["def.py"] issue_a = _get_issue_instance() issue_b = _get_issue_instance() get_issue_list.return_value = [issue_a, issue_b] self.manager.metrics.data["_totals"] = {"loc": 1000, "nosec": 50} for category in ["SEVERITY", "CONFIDENCE"]: for level in ["UNDEFINED", "LOW", "MEDIUM", "HIGH"]: self.manager.metrics.data["_totals"]["%s.%s" % (category, level)] = 1 # Validate that we're outputting the correct issues output_str_fn = "bandit.formatters.text._output_issue_str" with mock.patch(output_str_fn) as output_str: output_str.return_value = "ISSUE_OUTPUT_TEXT" tmp_file = open(self.tmp_fname, "w") b_text.report(self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5) calls = [mock.call(issue_a, "", lines=5), mock.call(issue_b, "", lines=5)] output_str.assert_has_calls(calls, any_order=True) # Validate that we're outputting all of the expected fields and the # correct values tmp_file = open(self.tmp_fname, "w") b_text.report(self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5) with open(self.tmp_fname) as f: data = f.read() expected_items = [ "Run started", "Files in scope (1)", "binding.py (score: ", "CONFIDENCE: 1", "SEVERITY: 1", "Files excluded (1):", "def.py", "Undefined: 1", "Low: 1", "Medium: 1", "High: 1", "Total lines skipped ", "(#nosec): 50", "Total issues (by severity)", "Total issues (by confidence)", "Files skipped (1)", "abc.py (File is bad)", ] for item in expected_items: self.assertIn(item, data)