Exemple #1
0
def test_execution_sequence2():
    runner = Runner([ExecutionSequence2])
    aggregator = runner.run()
    metrics = aggregator.get_basic_report()["tests"]
    assert metrics[TestCategory.SUCCESS] == 0
    assert metrics[TestCategory.FAIL] == 6
    suites = runner.get_executed_suites()
    metrics = suites[0].metrics.get_metrics()
    QualityManager.check_class_metrics(
        metrics,
        expected_status="fail",
        expected_retry_count=2,
        expected_beforetest_exception_count=20,
        expected_beforetest_performance_count=20,
        expected_beforetest_exception_object=None,
        expected_aftertest_exception_count=20,
        expected_aftertest_performance_count=20,
        expected_aftertest_exception_object=AssertionError)
    for test in suites[0].get_test_objects():
        test_metrics = test.metrics.get_metrics()
        for class_param, class_param_data in test_metrics.items():
            for param, param_data in class_param_data.items():
                expected_value = 4 if param_data["param"] is not None else 2
                assert len(param_data[DecoratorType.AFTER_TEST]
                           ["exceptions"]) == expected_value
                assert len(param_data[DecoratorType.AFTER_TEST]
                           ["tracebacks"]) == expected_value
                assert len(param_data[DecoratorType.AFTER_TEST]
                           ["performance"]) == expected_value
Exemple #2
0
def test_execution_sequence4():
    runner = Runner([ExecutionSequence4])
    aggregator = runner.run()
    metrics = aggregator.get_basic_report()["tests"]
    assert metrics[TestCategory.SUCCESS] == 6
    assert metrics[TestCategory.FAIL] == 0
    suites = runner.get_executed_suites()
    metrics = suites[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics, expected_status="success")
    for test in suites[0].get_test_objects():
        test_metrics = test.metrics.get_metrics()
        for class_param, class_param_data in test_metrics.items():
            for param, param_data in class_param_data.items():
                expected_value = 0
                assert len(param_data[DecoratorType.AFTER_TEST]
                           ["exceptions"]) == expected_value
                assert len(param_data[DecoratorType.AFTER_TEST]
                           ["tracebacks"]) == expected_value
                assert len(param_data[DecoratorType.AFTER_TEST]
                           ["performance"]) == expected_value
                assert len(param_data[DecoratorType.BEFORE_TEST]
                           ["exceptions"]) == expected_value
                assert len(param_data[DecoratorType.BEFORE_TEST]
                           ["tracebacks"]) == expected_value
                assert len(param_data[DecoratorType.BEFORE_TEST]
                           ["performance"]) == expected_value
Exemple #3
0
def test_runner_without_config():

    Cmd.run(['python', EXE, 'config', 'restore', '--all'])
    Cmd.run(['python', EXE, 'config', 'update', '-k', 'ui'])
    Cmd.run(['python', EXE, 'config', 'update', '-g', 'sso'])

    runner = Runner(suites=[NewProductsSuite])
    runner.run()
    results = runner.get_executed_suites()
    tests = results[0].get_test_objects()
    metrics = results[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics, expected_status="success")

    assert len(
        tests
    ) == 5, "Expected 5 tests to be executed, Actually executed: {}".format(
        len(tests))

    for test in tests:
        for class_param, class_data in test.metrics.get_metrics().items():
            for param, metrics in class_data.items():
                QualityManager.check_test_metrics(
                    metrics,
                    expected_status="success",
                    expected_param=param,
                    expected_class_param=metrics["class_param"])
Exemple #4
0
def test_parameters_plus_plus():
    runner = Runner([ParametersSuite])
    aggregator = runner.run()
    metrics = aggregator.get_basic_report()["tests"]
    assert metrics[TestCategory.SUCCESS] == 36
    assert metrics[TestCategory.IGNORE] == 4
    suites = runner.get_executed_suites()
    metrics = suites[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics, expected_status="fail")
Exemple #5
0
def test_component():
    runner = Runner([SkipFeature])
    aggregator = runner.run(features=["Login"])
    metrics = aggregator.get_basic_report()["tests"]
    assert metrics[TestCategory.SUCCESS] == 3
    suites = runner.get_executed_suites()

    metrics = suites[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics, expected_status="success")
def test_component():

    runner = Runner([SkipTests])
    aggregator = runner.run(tests=[SkipTests.test_1, SkipTests.test_2])
    metrics = aggregator.get_basic_report()["tests"]

    assert metrics[TestCategory.SUCCESS] == 2
    assert metrics[TestCategory.SKIP] == 1

    suites = runner.get_executed_suites()
    metrics = suites[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics, expected_status="success")
Exemple #7
0
def test_reporting():
    f = __file__.replace("test_reporting.py",
                         "test_{}".format(int(time.time())))
    html = "{}.html".format(f)
    xml = "{}.xml".format(f)
    runner = Runner([Login, LoginSessions, Dashboard],
                    monitor_resources=True,
                    html_report=html,
                    xml_report=xml)
    runner.run()

    suites = runner.get_executed_suites()
    for suite in suites:
        suite.get_average_performance_of_after_class()
        suite.get_average_performance_of_before_class()
        suite.get_average_performance_of_after_test()
        suite.get_average_performance_of_before_test()
Exemple #8
0
def test_runner_with_config():

    Cmd.run(['python', EXE, 'config', 'restore', '--all'])
    Cmd.run(['python', EXE, 'config', 'update', '-k', 'ui'])
    Cmd.run(['python', EXE, 'config', 'update', '-g', 'sso'])
    runner = Runner(suites=[ShoppingCartSuite, AuthApiSuite],
                    config=Config.get_config_path(CliConstants.TJ_CONFIG_NAME))
    runner.run()

    results = runner.get_executed_suites()
    tests_ShoppingCartSuite = results[0].get_test_objects()
    tests_AuthApiSuite = results[1].get_test_objects()
    pprint.pprint(results[0].metrics.get_metrics())

    metrics = results[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics, expected_status="success")
    print(len(tests_ShoppingCartSuite))
    assert len(tests_ShoppingCartSuite) == 5, \
        "Expected 5 tests to be executed, Actually executed: {}".format(len(tests_ShoppingCartSuite))
    assert len(tests_AuthApiSuite) == 6, \
        "Expected 6 tests to be executed, Actually executed: {}".format(len(tests_AuthApiSuite))
    for test in tests_ShoppingCartSuite:
        for class_param, class_data in test.metrics.get_metrics().items():
            for param, metrics in class_data.items():
                QualityManager.check_test_metrics(
                    metrics,
                    expected_status="success",
                    expected_param=param,
                    expected_class_param=metrics["class_param"])
    for test in tests_AuthApiSuite:
        for class_param, class_data in test.metrics.get_metrics().items():
            for param, metrics in class_data.items():
                QualityManager.check_test_metrics(
                    metrics,
                    expected_status="skip",
                    expected_param=param,
                    expected_class_param=metrics["class_param"],
                    expected_retry_count=2,
                    expected_exception_count=2,
                    expected_performance_count=2)
Exemple #9
0
import pprint

from test_junkie.errors import BadParameters, BadSignature
from test_junkie.runner import Runner
from tests.QualityManager import QualityManager
from tests.junkie_suites.IgnoreSuite import IgnoreSuiteBoundMethod, IgnoreSuiteFunction, IgnoreSuiteClassic, \
    IgnoreSuiteClassic2, IgnoreSuiteClassic3
from tests.junkie_suites.error_handling.ErrorSuite4 import ErrorSuite4
from tests.junkie_suites.error_handling.ErrorSuite5 import ErrorSuite5
from tests.junkie_suites.error_handling.ErrorSuite6 import ErrorSuite6

runner1 = Runner([IgnoreSuiteBoundMethod])
runner1.run()
results1 = runner1.get_executed_suites()

pprint.pprint(results1[0].metrics.get_metrics())
for test in results1[0].get_test_objects():
    print(test.get_function_name())
    pprint.pprint(test.metrics.get_metrics())


def test_class_metrics():

    metrics = results1[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics,
                                       expected_status="ignore",
                                       expected_retry_count=0)


def test_test_metrics():
Exemple #10
0
from test_junkie.constants import TestCategory, DecoratorType
from test_junkie.runner import Runner
from tests.QualityManager import QualityManager
from tests.junkie_suites.BasicSuite import BasicSuite
from tests.junkie_suites.ExecutionSquence import ExecutionSequence1, ExecutionSequence2, ExecutionSequence3, \
    ExecutionSequence4
from tests.junkie_suites.ParametersSuite import ParametersSuite

runner = Runner([BasicSuite])
runner.run()
results = runner.get_executed_suites()
tests = results[0].get_test_objects()


def test_class_metrics():

    metrics = results[0].metrics.get_metrics()
    QualityManager.check_class_metrics(
        metrics,
        expected_status="fail",
        expected_beforeclass_exception_count=1,
        expected_beforeclass_exception_object=None,
        expected_beforeclass_performance_count=1,
        expected_afterclass_exception_count=1,
        expected_afterclass_exception_object=None,
        expected_afterclass_performance_count=1,
        expected_beforetest_exception_count=8,
        expected_beforetest_exception_object=None,
        expected_beforetest_performance_count=8,
        expected_aftertest_exception_count=8,
        expected_aftertest_exception_object=None,
Exemple #11
0
import pprint

from test_junkie.runner import Runner
from tests.QualityManager import QualityManager
from tests.junkie_suites.AfterTestAssertionSuite import AfterTestAssertionSuite
from tests.junkie_suites.AfterTestExceptionSuite import AfterTestExceptionSuite
from tests.junkie_suites.BeforeTestAssertionSuite import BeforeTestAssertionSuite
from tests.junkie_suites.BeforeTestExceptionSuite import BeforeTestExceptionSuite

runner = Runner([BeforeTestAssertionSuite])
runner.run()
results = runner.get_executed_suites()

pprint.pprint(results[0].metrics.get_metrics())
for test in results[0].get_test_objects():
    print(test.get_function_name())
    pprint.pprint(test.metrics.get_metrics())


def test_class_metrics():

    metrics = results[0].metrics.get_metrics()
    pprint.pprint(metrics)
    QualityManager.check_class_metrics(
        metrics,
        expected_retry_count=2,
        expected_status="fail",
        expected_beforetest_exception_count=8,
        expected_beforetest_exception_object=AssertionError,
        expected_beforetest_performance_count=8)