def test_display_failures(swagger_20, capsys, execution_context, results_set, verbosity, response): execution_context.verbosity = verbosity # Given two test results - success and failure operation = models.APIOperation("/api/failure", "GET", {}, base_url="http://127.0.0.1:8080", schema=swagger_20) failure = models.TestResult(operation.method, operation.full_path, DataGenerationMethod.default()) failure.add_failure("test", models.Case(operation), response, 0, "Message") execution_context.results.append( SerializedTestResult.from_test_result(failure)) results_set.append(failure) event = Finished.from_results(results_set, 1.0) # When the failures are displayed default.display_failures(execution_context, event) out = capsys.readouterr().out.strip() # Then section title is displayed assert " FAILURES " in out # And operation with a failure is displayed as a subsection assert " GET: /v1/api/failure " in out assert "Message" in out assert "Run this Python code to reproduce this failure: " in out assert f"requests.get('http://127.0.0.1:8080/api/failure', headers={{'User-Agent': '{USER_AGENT}'}})" in out
def test_display_errors(swagger_20, capsys, results_set, execution_context, show_errors_tracebacks, verbosity): execution_context.verbosity = verbosity # Given two test results - success and error endpoint = models.Endpoint("/api/error", "GET", {}, swagger_20) error = models.TestResult(endpoint, seed=123) error.add_error(ConnectionError("Connection refused!"), models.Case(endpoint, query={"a": 1})) results_set.append(error) execution_context.results.append( SerializedTestResult.from_test_result(error)) event = Finished.from_results(results_set, 1.0) # When the errors are displayed execution_context.show_errors_tracebacks = show_errors_tracebacks default.display_errors(execution_context, event) out = capsys.readouterr().out.strip() # Then section title is displayed assert " ERRORS " in out help_message_exists = ( "Add this option to your command line parameters to see full tracebacks: --show-errors-tracebacks" in out) # And help message is displayed only if tracebacks are not shown assert help_message_exists is not show_errors_tracebacks # And endpoint with an error is displayed as a subsection assert " GET: /api/error " in out # And the error itself is displayed assert "ConnectionError: Connection refused!" in out # And the example is displayed assert "Query : {'a': 1}" in out assert "Or add this option to your command line parameters: --hypothesis-seed=123" in out
def test_display_statistic(capsys, swagger_20, execution_context, operation, response): # Given multiple successful & failed checks in a single test success = models.Check("not_a_server_error", models.Status.success, response, 0, models.Case(operation)) failure = models.Check("not_a_server_error", models.Status.failure, response, 0, models.Case(operation)) single_test_statistic = models.TestResult( operation.method, operation.full_path, DataGenerationMethod.default(), [ success, success, success, failure, failure, models.Check("different_check", models.Status.success, response, 0, models.Case(operation)), ], ) results = models.TestResultSet([single_test_statistic]) event = Finished.from_results(results, running_time=1.0) # When test results are displayed default.display_statistic(execution_context, event) lines = [line for line in capsys.readouterr().out.split("\n") if line] failed = strip_style_win32(click.style("FAILED", bold=True, fg="red")) passed = strip_style_win32(click.style("PASSED", bold=True, fg="green")) # Then all check results should be properly displayed with relevant colors assert lines[2:4] == [ f" not_a_server_error 3 / 5 passed {failed} ", f" different_check 1 / 1 passed {passed} ", ]
def test_display_failures(swagger_20, capsys, execution_context, results_set, verbosity, response): execution_context.verbosity = verbosity # Given two test results - success and failure operation = models.APIOperation("/api/failure", "GET", {}, base_url="http://127.0.0.1:8080", schema=swagger_20) failure = models.TestResult( operation.method, operation.full_path, verbose_name=f"{operation.method} {operation.full_path}", data_generation_method=DataGenerationMethod.default(), ) failure.add_failure("test", models.Case(operation), response, 0, "Message", None) execution_context.results.append( SerializedTestResult.from_test_result(failure)) results_set.append(failure) event = Finished.from_results(results_set, 1.0) # When the failures are displayed default.display_failures(execution_context, event) out = capsys.readouterr().out.strip() # Then section title is displayed assert " FAILURES " in out # And operation with a failure is displayed as a subsection assert " GET /v1/api/failure " in out assert "Message" in out assert "Run this cURL command to reproduce this failure:" in out headers = f"-H 'Content-Length: 0' -H 'Content-Type: application/json' -H 'User-Agent: {USER_AGENT}'" assert f"curl -X GET {headers} http://127.0.0.1:8080/api/failure" in out
def finished(): return Finished( passed_count=0, failed_count=0, errored_count=0, has_failures=False, has_errors=False, has_logs=False, is_empty=False, total={}, running_time=1.0, )
def test_display_failures(swagger_20, capsys, execution_context, results_set): # Given two test results - success and failure endpoint = models.Endpoint("/api/failure", "GET", {}, base_url="http://127.0.0.1:8080", schema=swagger_20) failure = models.TestResult(endpoint) failure.add_failure("test", models.Case(endpoint), "Message") execution_context.results.append(SerializedTestResult.from_test_result(failure)) results_set.append(failure) event = Finished.from_results(results_set, 1.0) # When the failures are displayed default.display_failures(execution_context, event) out = capsys.readouterr().out.strip() # Then section title is displayed assert " FAILURES " in out # And endpoint with a failure is displayed as a subsection assert " GET: /api/failure " in out assert "Message" in out # And check name is displayed assert "Check : test" in out assert "Run this Python code to reproduce this failure: " in out assert "requests.get('http://127.0.0.1:8080/api/failure')" in out
def test_display_statistic(capsys, swagger_20, endpoint): # Given multiple successful & failed checks in a single test success = models.Check("not_a_server_error", models.Status.success) failure = models.Check("not_a_server_error", models.Status.failure) single_test_statistic = models.TestResult( endpoint, [success, success, success, failure, failure, models.Check("different_check", models.Status.success)] ) results = models.TestResultSet([single_test_statistic]) event = Finished.from_results(results, running_time=1.0) # When test results are displayed default.display_statistic(event) lines = [line for line in capsys.readouterr().out.split("\n") if line] failed = strip_style_win32(click.style("FAILED", bold=True, fg="red")) not_a_server_error = strip_style_win32(click.style("not_a_server_error", bold=True)) different_check = strip_style_win32(click.style("different_check", bold=True)) passed = strip_style_win32(click.style("PASSED", bold=True, fg="green")) # Then all check results should be properly displayed with relevant colors assert lines[1:3] == [ f"{not_a_server_error} 3 / 5 passed {failed} ", f"{different_check} 1 / 1 passed {passed} ", ]