Beispiel #1
0
def test_display_statistic_junitxml(capsys, execution_context, results_set):
    xml_path = "/tmp/junit.xml"
    execution_context.junit_xml_file = xml_path
    default.display_statistic(execution_context, results_set)
    assert capsys.readouterr().out.split("\n")[4] == strip_style_win32(
        click.style("JUnit XML file", bold=True) +
        click.style(f": {xml_path}"))
Beispiel #2
0
def test_display_statistic(capsys, swagger_20, execution_context, operation,
                           response):
    # Given multiple successful & failed checks in a single test
    success = models.Check("not_a_server_error", models.Status.success,
                           response, 0, models.Case(operation))
    failure = models.Check("not_a_server_error", models.Status.failure,
                           response, 0, models.Case(operation))
    single_test_statistic = models.TestResult(
        operation.method,
        operation.full_path,
        DataGenerationMethod.default(),
        [
            success,
            success,
            success,
            failure,
            failure,
            models.Check("different_check", models.Status.success, response, 0,
                         models.Case(operation)),
        ],
    )
    results = models.TestResultSet([single_test_statistic])
    event = Finished.from_results(results, running_time=1.0)
    # When test results are displayed
    default.display_statistic(execution_context, event)

    lines = [line for line in capsys.readouterr().out.split("\n") if line]
    failed = strip_style_win32(click.style("FAILED", bold=True, fg="red"))
    passed = strip_style_win32(click.style("PASSED", bold=True, fg="green"))
    # Then all check results should be properly displayed with relevant colors
    assert lines[2:4] == [
        f"    not_a_server_error                    3 / 5 passed          {failed} ",
        f"    different_check                       1 / 1 passed          {passed} ",
    ]
Beispiel #3
0
def test_display_statistic(capsys, swagger_20, endpoint):
    # Given multiple successful & failed checks in a single test
    success = models.Check("not_a_server_error", models.Status.success)
    failure = models.Check("not_a_server_error", models.Status.failure)
    single_test_statistic = models.TestResult(
        endpoint, [success, success, success, failure, failure, models.Check("different_check", models.Status.success)]
    )
    results = models.TestResultSet([single_test_statistic])
    # When test results are displayed
    default.display_statistic(results)

    lines = [line for line in capsys.readouterr().out.split("\n") if line]
    failed = click.style("FAILED", bold=True, fg="red")
    not_a_server_error = click.style("not_a_server_error", bold=True)
    different_check = click.style("different_check", bold=True)
    passed = click.style("PASSED", bold=True, fg="green")
    # Then all check results should be properly displayed with relevant colors
    assert lines[1:3] == [
        f"{not_a_server_error}            3 / 5 passed          {failed} ",
        f"{different_check}               1 / 1 passed          {passed} ",
    ]
Beispiel #4
0
def test_display_statistic_empty(capsys, execution_context, results_set):
    default.display_statistic(execution_context, results_set)
    assert capsys.readouterr().out.split("\n")[2] == strip_style_win32(
        click.style("No checks were performed.", bold=True))
Beispiel #5
0
def test_display_statistic_empty(capsys, results_set):
    default.display_statistic(results_set)
    assert capsys.readouterr().out.split("\n")[2] == click.style("No checks were performed.", bold=True)