def test_test_metrics():

    assert results[0].get_test_objects()
    for test in results[0].get_test_objects():

        metrics = test.metrics.get_metrics()["None"]["None"]
        QualityManager.check_test_metrics(metrics)
Ejemplo n.º 2
0
def test_retry5():

    tested = False
    for test in tests:
        if test.get_function_name() == "retry5":
            for class_param, class_data in test.metrics.get_metrics().items():
                for param, metrics in class_data.items():
                    if metrics["class_param"] == 1:
                        QualityManager.check_test_metrics(
                            metrics,
                            expected_status="error",
                            expected_exception=Exception,
                            expected_retry_count=4,
                            expected_exception_count=4,
                            expected_performance_count=4,
                            expected_param=param,
                            expected_class_param=metrics["class_param"])
                        for i in metrics["exceptions"]:
                            assert str(i.with_traceback(None)) == "On purpose"
                    else:
                        QualityManager.check_test_metrics(
                            metrics,
                            expected_status="success",
                            expected_param=param,
                            expected_class_param=metrics["class_param"])
                    tested = True
    if not tested:
        raise Exception("Test did not run")
Ejemplo n.º 3
0
def test_execution_sequence2():
    runner = Runner([ExecutionSequence2])
    aggregator = runner.run()
    metrics = aggregator.get_basic_report()["tests"]
    assert metrics[TestCategory.SUCCESS] == 0
    assert metrics[TestCategory.FAIL] == 6
    suites = runner.get_executed_suites()
    metrics = suites[0].metrics.get_metrics()
    QualityManager.check_class_metrics(
        metrics,
        expected_status="fail",
        expected_retry_count=2,
        expected_beforetest_exception_count=20,
        expected_beforetest_performance_count=20,
        expected_beforetest_exception_object=None,
        expected_aftertest_exception_count=20,
        expected_aftertest_performance_count=20,
        expected_aftertest_exception_object=AssertionError)
    for test in suites[0].get_test_objects():
        test_metrics = test.metrics.get_metrics()
        for class_param, class_param_data in test_metrics.items():
            for param, param_data in class_param_data.items():
                expected_value = 4 if param_data["param"] is not None else 2
                assert len(param_data[DecoratorType.AFTER_TEST]
                           ["exceptions"]) == expected_value
                assert len(param_data[DecoratorType.AFTER_TEST]
                           ["tracebacks"]) == expected_value
                assert len(param_data[DecoratorType.AFTER_TEST]
                           ["performance"]) == expected_value
Ejemplo n.º 4
0
def test_execution_sequence4():
    runner = Runner([ExecutionSequence4])
    aggregator = runner.run()
    metrics = aggregator.get_basic_report()["tests"]
    assert metrics[TestCategory.SUCCESS] == 6
    assert metrics[TestCategory.FAIL] == 0
    suites = runner.get_executed_suites()
    metrics = suites[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics, expected_status="success")
    for test in suites[0].get_test_objects():
        test_metrics = test.metrics.get_metrics()
        for class_param, class_param_data in test_metrics.items():
            for param, param_data in class_param_data.items():
                expected_value = 0
                assert len(param_data[DecoratorType.AFTER_TEST]
                           ["exceptions"]) == expected_value
                assert len(param_data[DecoratorType.AFTER_TEST]
                           ["tracebacks"]) == expected_value
                assert len(param_data[DecoratorType.AFTER_TEST]
                           ["performance"]) == expected_value
                assert len(param_data[DecoratorType.BEFORE_TEST]
                           ["exceptions"]) == expected_value
                assert len(param_data[DecoratorType.BEFORE_TEST]
                           ["tracebacks"]) == expected_value
                assert len(param_data[DecoratorType.BEFORE_TEST]
                           ["performance"]) == expected_value
Ejemplo n.º 5
0
def test_runner_without_config():

    Cmd.run(['python', EXE, 'config', 'restore', '--all'])
    Cmd.run(['python', EXE, 'config', 'update', '-k', 'ui'])
    Cmd.run(['python', EXE, 'config', 'update', '-g', 'sso'])

    runner = Runner(suites=[NewProductsSuite])
    runner.run()
    results = runner.get_executed_suites()
    tests = results[0].get_test_objects()
    metrics = results[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics, expected_status="success")

    assert len(
        tests
    ) == 5, "Expected 5 tests to be executed, Actually executed: {}".format(
        len(tests))

    for test in tests:
        for class_param, class_data in test.metrics.get_metrics().items():
            for param, metrics in class_data.items():
                QualityManager.check_test_metrics(
                    metrics,
                    expected_status="success",
                    expected_param=param,
                    expected_class_param=metrics["class_param"])
Ejemplo n.º 6
0
def test_before_group_exceptions():

    for suite in results:
        metrics = suite.metrics.get_metrics()
        QualityManager.check_class_metrics(metrics,
                                           expected_status="ignore",
                                           expected_retry_count=0)
Ejemplo n.º 7
0
def test_component():
    runner = Runner([SkipFeature])
    aggregator = runner.run(features=["Login"])
    metrics = aggregator.get_basic_report()["tests"]
    assert metrics[TestCategory.SUCCESS] == 3
    suites = runner.get_executed_suites()

    metrics = suites[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics, expected_status="success")
Ejemplo n.º 8
0
def test_parameters_plus_plus():
    runner = Runner([ParametersSuite])
    aggregator = runner.run()
    metrics = aggregator.get_basic_report()["tests"]
    assert metrics[TestCategory.SUCCESS] == 36
    assert metrics[TestCategory.IGNORE] == 4
    suites = runner.get_executed_suites()
    metrics = suites[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics, expected_status="fail")
Ejemplo n.º 9
0
def test_class_metrics():

    metrics = results[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics,
                                       expected_status="fail",
                                       expected_retry_count=2,
                                       expected_beforeclass_exception_count=2,
                                       expected_beforeclass_exception_object=Exception,
                                       expected_beforeclass_performance_count=2)
Ejemplo n.º 10
0
def test_skip_function():

    tested = False
    for test in tests:
        if test.get_function_name() == "skip_function":
            metrics = test.metrics.get_metrics()["None"]["None"]
            QualityManager.check_test_metrics(metrics, expected_status="skip")
            tested = True
    if not tested:
        raise Exception("Test did not run")
Ejemplo n.º 11
0
def test_before_test_error1():

    metrics5 = results5[0].metrics.get_metrics()
    QualityManager.check_class_metrics(
        metrics5,
        expected_status="fail",
        expected_retry_count=1,
        expected_beforetest_performance_count=1,
        expected_beforetest_exception_object=Exception,
        expected_beforetest_exception_count=1)
Ejemplo n.º 12
0
def test_after_test_error3():

    metrics6 = results6[0].metrics.get_metrics()
    QualityManager.check_class_metrics(
        metrics6,
        expected_status="fail",
        expected_retry_count=1,
        expected_aftertest_performance_count=1,
        expected_aftertest_exception_object=Exception,
        expected_aftertest_exception_count=1)
Ejemplo n.º 13
0
def test_after_test_error4():

    assert results6[0].get_test_objects()
    for test in results6[0].get_test_objects():

        if test.get_function_name() == "failure":
            metrics = test.metrics.get_metrics()["None"]["None"]
            QualityManager.check_test_metrics(metrics,
                                              expected_status="error",
                                              expected_exception=Exception)
Ejemplo n.º 14
0
def test_failure2():
    tests = results2[0].get_test_objects()
    for test in tests:
        metrics = test.metrics.get_metrics()["None"]["None"]
        QualityManager.check_test_metrics(metrics,
                                          expected_status="error",
                                          expected_retry_count=4,
                                          expected_exception_count=4,
                                          expected_performance_count=4,
                                          expected_exception=Exception)
Ejemplo n.º 15
0
def test_failure3():
    tests = results3[0].get_test_objects()
    for test in tests:
        metrics = test.metrics.get_metrics()["None"]["None"]
        pprint.pprint(metrics)
        QualityManager.check_test_metrics(metrics,
                                          expected_status="fail",
                                          expected_retry_count=4,
                                          expected_exception_count=4,
                                          expected_performance_count=4,
                                          expected_exception=AssertionError)
def test_class_metrics():

    metrics = results[0].metrics.get_metrics()
    QualityManager.check_class_metrics(
        metrics,
        expected_beforeclass_exception_count=1,
        expected_beforeclass_exception_object=None,
        expected_beforeclass_performance_count=1,
        expected_afterclass_exception_count=1,
        expected_afterclass_exception_object=Exception,
        expected_afterclass_performance_count=1)
Ejemplo n.º 17
0
def test_class_metrics3():

    metrics = results3[0].metrics.get_metrics()
    pprint.pprint(metrics)
    QualityManager.check_class_metrics(
        metrics,
        expected_retry_count=2,
        expected_status="fail",
        expected_aftertest_exception_count=8,
        expected_aftertest_exception_object=AssertionError,
        expected_aftertest_performance_count=8)
Ejemplo n.º 18
0
def test_component():

    runner = Runner([SkipTests])
    aggregator = runner.run(tests=[SkipTests.test_1, SkipTests.test_2])
    metrics = aggregator.get_basic_report()["tests"]

    assert metrics[TestCategory.SUCCESS] == 2
    assert metrics[TestCategory.SKIP] == 1

    suites = runner.get_executed_suites()
    metrics = suites[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics, expected_status="success")
Ejemplo n.º 19
0
def test_no_retry_on_assertion():

    tested = False
    for test in tests:
        if test.get_function_name() == "no_retry_on_assertion":
            metrics = test.metrics.get_metrics()["None"]["None"]
            QualityManager.check_test_metrics(metrics,
                                              expected_status="fail",
                                              expected_exception=AssertionError)
            tested = True
    if not tested:
        raise Exception("Test did not run")
Ejemplo n.º 20
0
def test_test_metrics():

    assert results[0].get_test_objects()
    for test in results[0].get_test_objects():

        metrics = test.metrics.get_metrics()["None"]["None"]
        QualityManager.check_test_metrics(metrics,
                                          expected_status="ignore",
                                          expected_exception_count=1,
                                          expected_performance_count=1,
                                          expected_retry_count=1,
                                          expected_exception=Exception)
Ejemplo n.º 21
0
def test_parameters():

    tested = False
    for test in tests:
        if test.get_function_name() == "parameters":
            properties = test.metrics.get_metrics()["None"]
            for param, metrics in properties.items():
                QualityManager.check_test_metrics(metrics,
                                                  expected_status="success",
                                                  expected_param=param)
            tested = True
    if not tested:
        raise Exception("Test did not run")
Ejemplo n.º 22
0
def test_no_retry3():

    tested = False
    for test in tests:
        if test.get_function_name() == "no_retry3":
            for class_param, class_data in test.metrics.get_metrics().items():
                for param, metrics in class_data.items():
                    QualityManager.check_test_metrics(
                        metrics,
                        expected_status="success",
                        expected_param=param,
                        expected_class_param=metrics["class_param"])
                    tested = True
    if not tested:
        raise Exception("Test did not run")
Ejemplo n.º 23
0
def test_retry_on_exception():

    tested = False
    for test in tests:
        if test.get_function_name() == "retry_on_exception":
            metrics = test.metrics.get_metrics()["None"]["None"]
            QualityManager.check_test_metrics(metrics,
                                              expected_retry_count=9,
                                              expected_performance_count=9,
                                              expected_exception_count=9,
                                              expected_status="error",
                                              expected_exception=Exception)
            tested = True
    if not tested:
        raise Exception("Test did not run")
Ejemplo n.º 24
0
def test_skip():

    tested = False
    for test in tests:
        if test.get_function_name() == "skip":
            for class_param, class_data in test.metrics.get_metrics().items():
                for param, metrics in class_data.items():
                    QualityManager.check_test_metrics(
                        metrics,
                        expected_status="skip",
                        expected_retry_count=2,
                        expected_exception_count=2,
                        expected_performance_count=2)
                    tested = True
    if not tested:
        raise Exception("Test did not run")
Ejemplo n.º 25
0
def test_retry():

    tested = False
    for test in tests:
        if test.get_function_name() == "retry":
            for class_param, class_data in test.metrics.get_metrics().items():
                for param, metrics in class_data.items():
                    QualityManager.check_test_metrics(
                        metrics,
                        expected_status="fail",
                        expected_exception=AssertionError,
                        expected_retry_count=4,
                        expected_exception_count=4,
                        expected_performance_count=4,
                        expected_class_param=metrics["class_param"])
                    for i in metrics["exceptions"]:
                        assert str(i.with_traceback(
                            None)) == "Expected Assertion Error"
                    tested = True
    if not tested:
        raise Exception("Test did not run")
Ejemplo n.º 26
0
def test_runner_with_config():

    Cmd.run(['python', EXE, 'config', 'restore', '--all'])
    Cmd.run(['python', EXE, 'config', 'update', '-k', 'ui'])
    Cmd.run(['python', EXE, 'config', 'update', '-g', 'sso'])
    runner = Runner(suites=[ShoppingCartSuite, AuthApiSuite],
                    config=Config.get_config_path(CliConstants.TJ_CONFIG_NAME))
    runner.run()

    results = runner.get_executed_suites()
    tests_ShoppingCartSuite = results[0].get_test_objects()
    tests_AuthApiSuite = results[1].get_test_objects()
    pprint.pprint(results[0].metrics.get_metrics())

    metrics = results[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics, expected_status="success")
    print(len(tests_ShoppingCartSuite))
    assert len(tests_ShoppingCartSuite) == 5, \
        "Expected 5 tests to be executed, Actually executed: {}".format(len(tests_ShoppingCartSuite))
    assert len(tests_AuthApiSuite) == 6, \
        "Expected 6 tests to be executed, Actually executed: {}".format(len(tests_AuthApiSuite))
    for test in tests_ShoppingCartSuite:
        for class_param, class_data in test.metrics.get_metrics().items():
            for param, metrics in class_data.items():
                QualityManager.check_test_metrics(
                    metrics,
                    expected_status="success",
                    expected_param=param,
                    expected_class_param=metrics["class_param"])
    for test in tests_AuthApiSuite:
        for class_param, class_data in test.metrics.get_metrics().items():
            for param, metrics in class_data.items():
                QualityManager.check_test_metrics(
                    metrics,
                    expected_status="skip",
                    expected_param=param,
                    expected_class_param=metrics["class_param"],
                    expected_retry_count=2,
                    expected_exception_count=2,
                    expected_performance_count=2)
Ejemplo n.º 27
0
def test_class_metrics3():

    metrics = results3[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics,
                                       expected_status="ignore",
                                       expected_retry_count=0)
Ejemplo n.º 28
0
def test_test_metrics():
    for test in tests:
        metrics = test.metrics.get_metrics()["None"]["None"]
        QualityManager.check_test_metrics(metrics)
Ejemplo n.º 29
0
def test_class_metrics():

    metrics = results[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics,
                                       expected_retry_count=0,
                                       expected_status="cancel")
Ejemplo n.º 30
0
def test_class_metrics():

    metrics = results[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics)