def test_main_fix(monkeypatch): out_file = os.path.join(BASE_PATH, 'test_report.json') monkeypatch.setattr(sys, "argv", [ 'app.py', os.path.join(BASE_PATH, 'manual_report_example.json'), '--fix', '--output', out_file ]) main() try: report = json.load(open(out_file)) assert "examples/__init__.py" in report.get('metrics', {}) totals = report.get('metrics', {}).get('_totals', {}) assert totals.get("nosec") == 0 assert totals.get("loc") == 5 assert totals.get("SEVERITY.UNDEFINED") == 0 assert totals.get("SEVERITY.HIGH") == 0 assert totals.get("SEVERITY.MEDIUM") == 1 assert totals.get("SEVERITY.LOW") == 1 assert totals.get("CONFIDENCE.UNDEFINED") == 0 assert totals.get("CONFIDENCE.HIGH") == 1 assert totals.get("CONFIDENCE.MEDIUM") == 1 assert totals.get("CONFIDENCE.LOW") == 0 hits = report.get('results', [{}, {}]) assert hits[0].get('filename') == "examples/assert.py" assert hits[1].get('filename') == "examples/binding.py" finally: os.remove(out_file)
def test_main_no_args(monkeypatch): exit_mock = ExitMock() monkeypatch.setattr(argparse.ArgumentParser, "exit", exit_mock.exit) with pytest.raises(SystemExit): main() assert len(exit_mock.CALL_ARGS) == 2 assert exit_mock.CALL_ARGS[0] == 2 expected_msg = 'the following arguments are required: baseline' if sys.version_info.major == 2: expected_msg = 'too few arguments' assert exit_mock.CALL_ARGS[1].endswith('error: {}\n'.format(expected_msg)) assert exit_mock.CALL_KWARGS == {}
def test_main_machine(monkeypatch): out_file = os.path.join(BASE_PATH, 'test_report.json') monkeypatch.setattr(sys, "argv", [ 'app.py', os.path.join(BASE_PATH, 'report_example.json'), '--machine', '--output', out_file ]) main() try: assert "\n" not in open(out_file).read() finally: os.remove(out_file)
def test_main_zip(monkeypatch): out_file = os.path.join(BASE_PATH, 'test_report.json') monkeypatch.setattr(sys, "argv", [ 'app.py', os.path.join(BASE_PATH, 'report_example.json'), '--zip', '--output', out_file ]) main() try: report = json.load(open(out_file)) assert "examples/__init__.py" not in report.get('metrics', {}) finally: os.remove(out_file)
def test_main_file_not_exist(monkeypatch): exit_mock = ExitMock() monkeypatch.setattr(argparse.ArgumentParser, "exit", exit_mock.exit) invalid_file = os.path.join(BASE_PATH, 'not_exist_file.json') monkeypatch.setattr(sys, "argv", ['app.py', invalid_file]) with pytest.raises(SystemExit): main() assert len(exit_mock.CALL_ARGS) == 2 assert exit_mock.CALL_ARGS[0] == -2 expected_msg = "File {} not found".format(invalid_file) assert exit_mock.CALL_ARGS[1] == expected_msg assert exit_mock.CALL_KWARGS == {}
def test_main_mix_file_not_exit(monkeypatch): out_file = os.path.join(BASE_PATH, 'test_report.json') invalid_file = os.path.join(BASE_PATH, 'not_exist_file.json') monkeypatch.setattr(sys, "argv", [ 'app.py', os.path.join(BASE_PATH, 'manual_report_example.json'), '--mixed', invalid_file, '--output', out_file ]) exit_mock = ExitMock() monkeypatch.setattr(argparse.ArgumentParser, "exit", exit_mock.exit) with pytest.raises(SystemExit): main() assert len(exit_mock.CALL_ARGS) == 2 assert exit_mock.CALL_ARGS[0] == -3 expected_msg = "File {} not found".format(invalid_file) assert exit_mock.CALL_ARGS[1] == expected_msg assert exit_mock.CALL_KWARGS == {}
def test_main_mix_file(monkeypatch): out_file = os.path.join(BASE_PATH, 'test_report.json') mix_file = os.path.join(BASE_PATH, 'mix_report_example.json') monkeypatch.setattr(sys, "argv", [ 'app.py', os.path.join(BASE_PATH, 'manual_report_example.json'), '--mixed', mix_file, '--output', out_file ]) main() try: mixed_report = json.load(open(out_file)) keys = mixed_report.get('metrics', {}).keys() assert "examples/__init__.py" in keys assert "examples/assert.py" in mixed_report.get('metrics', {}).keys() assert "examples/binding.py" in mixed_report.get('metrics', {}).keys() assert "examples/assert.py" in mixed_report.get('metrics', {}).keys() assert "examples/django_sql_injection_extra.py" in mixed_report.get( 'metrics', {}).keys() assert "examples/django_sql_injection_raw.py" in mixed_report.get( 'metrics', {}).keys() assert "examples/eval.py" in mixed_report.get('metrics', {}) totals = mixed_report.get('metrics', {}).get('_totals') assert totals.get("CONFIDENCE.HIGH") == 4 assert totals.get("CONFIDENCE.LOW") == 0 assert totals.get("CONFIDENCE.MEDIUM") == 16 assert totals.get("CONFIDENCE.UNDEFINED") == 0 assert totals.get("SEVERITY.HIGH") == 0 assert totals.get("SEVERITY.LOW") == 1 assert totals.get("SEVERITY.MEDIUM") == 19 assert totals.get("SEVERITY.UNDEFINED") == 0 assert totals.get("loc") == 50 assert totals.get("nosec") == 1 finally: os.remove(out_file)