Ejemplo n.º 1
0
def test_execution_sequence2():
    runner = Runner([ExecutionSequence2])
    aggregator = runner.run()
    metrics = aggregator.get_basic_report()["tests"]
    assert metrics[TestCategory.SUCCESS] == 0
    assert metrics[TestCategory.FAIL] == 6
    suites = runner.get_executed_suites()
    metrics = suites[0].metrics.get_metrics()
    QualityManager.check_class_metrics(
        metrics,
        expected_status="fail",
        expected_retry_count=2,
        expected_beforetest_exception_count=20,
        expected_beforetest_performance_count=20,
        expected_beforetest_exception_object=None,
        expected_aftertest_exception_count=20,
        expected_aftertest_performance_count=20,
        expected_aftertest_exception_object=AssertionError)
    for test in suites[0].get_test_objects():
        test_metrics = test.metrics.get_metrics()
        for class_param, class_param_data in test_metrics.items():
            for param, param_data in class_param_data.items():
                expected_value = 4 if param_data["param"] is not None else 2
                assert len(param_data[DecoratorType.AFTER_TEST]
                           ["exceptions"]) == expected_value
                assert len(param_data[DecoratorType.AFTER_TEST]
                           ["tracebacks"]) == expected_value
                assert len(param_data[DecoratorType.AFTER_TEST]
                           ["performance"]) == expected_value
Ejemplo n.º 2
0
def test_execution_sequence4():
    runner = Runner([ExecutionSequence4])
    aggregator = runner.run()
    metrics = aggregator.get_basic_report()["tests"]
    assert metrics[TestCategory.SUCCESS] == 6
    assert metrics[TestCategory.FAIL] == 0
    suites = runner.get_executed_suites()
    metrics = suites[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics, expected_status="success")
    for test in suites[0].get_test_objects():
        test_metrics = test.metrics.get_metrics()
        for class_param, class_param_data in test_metrics.items():
            for param, param_data in class_param_data.items():
                expected_value = 0
                assert len(param_data[DecoratorType.AFTER_TEST]
                           ["exceptions"]) == expected_value
                assert len(param_data[DecoratorType.AFTER_TEST]
                           ["tracebacks"]) == expected_value
                assert len(param_data[DecoratorType.AFTER_TEST]
                           ["performance"]) == expected_value
                assert len(param_data[DecoratorType.BEFORE_TEST]
                           ["exceptions"]) == expected_value
                assert len(param_data[DecoratorType.BEFORE_TEST]
                           ["tracebacks"]) == expected_value
                assert len(param_data[DecoratorType.BEFORE_TEST]
                           ["performance"]) == expected_value
Ejemplo n.º 3
0
def test_runner_without_config():

    Cmd.run(['python', EXE, 'config', 'restore', '--all'])
    Cmd.run(['python', EXE, 'config', 'update', '-k', 'ui'])
    Cmd.run(['python', EXE, 'config', 'update', '-g', 'sso'])

    runner = Runner(suites=[NewProductsSuite])
    runner.run()
    results = runner.get_executed_suites()
    tests = results[0].get_test_objects()
    metrics = results[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics, expected_status="success")

    assert len(
        tests
    ) == 5, "Expected 5 tests to be executed, Actually executed: {}".format(
        len(tests))

    for test in tests:
        for class_param, class_data in test.metrics.get_metrics().items():
            for param, metrics in class_data.items():
                QualityManager.check_test_metrics(
                    metrics,
                    expected_status="success",
                    expected_param=param,
                    expected_class_param=metrics["class_param"])
Ejemplo n.º 4
0
def test_before_group_exceptions():

    for suite in results:
        metrics = suite.metrics.get_metrics()
        QualityManager.check_class_metrics(metrics,
                                           expected_status="ignore",
                                           expected_retry_count=0)
Ejemplo n.º 5
0
def test_parameters_plus_plus():
    runner = Runner([ParametersSuite])
    aggregator = runner.run()
    metrics = aggregator.get_basic_report()["tests"]
    assert metrics[TestCategory.SUCCESS] == 36
    assert metrics[TestCategory.IGNORE] == 4
    suites = runner.get_executed_suites()
    metrics = suites[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics, expected_status="fail")
Ejemplo n.º 6
0
def test_class_metrics():

    metrics = results[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics,
                                       expected_status="fail",
                                       expected_retry_count=2,
                                       expected_beforeclass_exception_count=2,
                                       expected_beforeclass_exception_object=Exception,
                                       expected_beforeclass_performance_count=2)
Ejemplo n.º 7
0
def test_component():
    runner = Runner([SkipFeature])
    aggregator = runner.run(features=["Login"])
    metrics = aggregator.get_basic_report()["tests"]
    assert metrics[TestCategory.SUCCESS] == 3
    suites = runner.get_executed_suites()

    metrics = suites[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics, expected_status="success")
Ejemplo n.º 8
0
def test_after_test_error3():

    metrics6 = results6[0].metrics.get_metrics()
    QualityManager.check_class_metrics(
        metrics6,
        expected_status="fail",
        expected_retry_count=1,
        expected_aftertest_performance_count=1,
        expected_aftertest_exception_object=Exception,
        expected_aftertest_exception_count=1)
Ejemplo n.º 9
0
def test_before_test_error1():

    metrics5 = results5[0].metrics.get_metrics()
    QualityManager.check_class_metrics(
        metrics5,
        expected_status="fail",
        expected_retry_count=1,
        expected_beforetest_performance_count=1,
        expected_beforetest_exception_object=Exception,
        expected_beforetest_exception_count=1)
def test_class_metrics():

    metrics = results[0].metrics.get_metrics()
    QualityManager.check_class_metrics(
        metrics,
        expected_beforeclass_exception_count=1,
        expected_beforeclass_exception_object=None,
        expected_beforeclass_performance_count=1,
        expected_afterclass_exception_count=1,
        expected_afterclass_exception_object=Exception,
        expected_afterclass_performance_count=1)
Ejemplo n.º 11
0
def test_class_metrics3():

    metrics = results3[0].metrics.get_metrics()
    pprint.pprint(metrics)
    QualityManager.check_class_metrics(
        metrics,
        expected_retry_count=2,
        expected_status="fail",
        expected_aftertest_exception_count=8,
        expected_aftertest_exception_object=AssertionError,
        expected_aftertest_performance_count=8)
Ejemplo n.º 12
0
def test_component():

    runner = Runner([SkipTests])
    aggregator = runner.run(tests=[SkipTests.test_1, SkipTests.test_2])
    metrics = aggregator.get_basic_report()["tests"]

    assert metrics[TestCategory.SUCCESS] == 2
    assert metrics[TestCategory.SKIP] == 1

    suites = runner.get_executed_suites()
    metrics = suites[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics, expected_status="success")
Ejemplo n.º 13
0
def test_runner_with_config():

    Cmd.run(['python', EXE, 'config', 'restore', '--all'])
    Cmd.run(['python', EXE, 'config', 'update', '-k', 'ui'])
    Cmd.run(['python', EXE, 'config', 'update', '-g', 'sso'])
    runner = Runner(suites=[ShoppingCartSuite, AuthApiSuite],
                    config=Config.get_config_path(CliConstants.TJ_CONFIG_NAME))
    runner.run()

    results = runner.get_executed_suites()
    tests_ShoppingCartSuite = results[0].get_test_objects()
    tests_AuthApiSuite = results[1].get_test_objects()
    pprint.pprint(results[0].metrics.get_metrics())

    metrics = results[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics, expected_status="success")
    print(len(tests_ShoppingCartSuite))
    assert len(tests_ShoppingCartSuite) == 5, \
        "Expected 5 tests to be executed, Actually executed: {}".format(len(tests_ShoppingCartSuite))
    assert len(tests_AuthApiSuite) == 6, \
        "Expected 6 tests to be executed, Actually executed: {}".format(len(tests_AuthApiSuite))
    for test in tests_ShoppingCartSuite:
        for class_param, class_data in test.metrics.get_metrics().items():
            for param, metrics in class_data.items():
                QualityManager.check_test_metrics(
                    metrics,
                    expected_status="success",
                    expected_param=param,
                    expected_class_param=metrics["class_param"])
    for test in tests_AuthApiSuite:
        for class_param, class_data in test.metrics.get_metrics().items():
            for param, metrics in class_data.items():
                QualityManager.check_test_metrics(
                    metrics,
                    expected_status="skip",
                    expected_param=param,
                    expected_class_param=metrics["class_param"],
                    expected_retry_count=2,
                    expected_exception_count=2,
                    expected_performance_count=2)
Ejemplo n.º 14
0
def test_class_metrics3():

    metrics = results3[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics,
                                       expected_status="ignore",
                                       expected_retry_count=0)
Ejemplo n.º 15
0
def test_class_metrics():

    metrics = results[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics,
                                       expected_retry_count=0,
                                       expected_status="cancel")
Ejemplo n.º 16
0
def test_class_metrics():

    metrics = results[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics)