Exemplo n.º 1
0
def test_runner_without_config():

    Cmd.run(['python', EXE, 'config', 'restore', '--all'])
    Cmd.run(['python', EXE, 'config', 'update', '-k', 'ui'])
    Cmd.run(['python', EXE, 'config', 'update', '-g', 'sso'])

    runner = Runner(suites=[NewProductsSuite])
    runner.run()
    results = runner.get_executed_suites()
    tests = results[0].get_test_objects()
    metrics = results[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics, expected_status="success")

    assert len(
        tests
    ) == 5, "Expected 5 tests to be executed, Actually executed: {}".format(
        len(tests))

    for test in tests:
        for class_param, class_data in test.metrics.get_metrics().items():
            for param, metrics in class_data.items():
                QualityManager.check_test_metrics(
                    metrics,
                    expected_status="success",
                    expected_param=param,
                    expected_class_param=metrics["class_param"])
Exemplo n.º 2
0
def test_execution_sequence2():
    runner = Runner([ExecutionSequence2])
    aggregator = runner.run()
    metrics = aggregator.get_basic_report()["tests"]
    assert metrics[TestCategory.SUCCESS] == 0
    assert metrics[TestCategory.FAIL] == 6
    suites = runner.get_executed_suites()
    metrics = suites[0].metrics.get_metrics()
    QualityManager.check_class_metrics(
        metrics,
        expected_status="fail",
        expected_retry_count=2,
        expected_beforetest_exception_count=20,
        expected_beforetest_performance_count=20,
        expected_beforetest_exception_object=None,
        expected_aftertest_exception_count=20,
        expected_aftertest_performance_count=20,
        expected_aftertest_exception_object=AssertionError)
    for test in suites[0].get_test_objects():
        test_metrics = test.metrics.get_metrics()
        for class_param, class_param_data in test_metrics.items():
            for param, param_data in class_param_data.items():
                expected_value = 4 if param_data["param"] is not None else 2
                assert len(param_data[DecoratorType.AFTER_TEST]
                           ["exceptions"]) == expected_value
                assert len(param_data[DecoratorType.AFTER_TEST]
                           ["tracebacks"]) == expected_value
                assert len(param_data[DecoratorType.AFTER_TEST]
                           ["performance"]) == expected_value
Exemplo n.º 3
0
def test_execution_sequence4():
    runner = Runner([ExecutionSequence4])
    aggregator = runner.run()
    metrics = aggregator.get_basic_report()["tests"]
    assert metrics[TestCategory.SUCCESS] == 6
    assert metrics[TestCategory.FAIL] == 0
    suites = runner.get_executed_suites()
    metrics = suites[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics, expected_status="success")
    for test in suites[0].get_test_objects():
        test_metrics = test.metrics.get_metrics()
        for class_param, class_param_data in test_metrics.items():
            for param, param_data in class_param_data.items():
                expected_value = 0
                assert len(param_data[DecoratorType.AFTER_TEST]
                           ["exceptions"]) == expected_value
                assert len(param_data[DecoratorType.AFTER_TEST]
                           ["tracebacks"]) == expected_value
                assert len(param_data[DecoratorType.AFTER_TEST]
                           ["performance"]) == expected_value
                assert len(param_data[DecoratorType.BEFORE_TEST]
                           ["exceptions"]) == expected_value
                assert len(param_data[DecoratorType.BEFORE_TEST]
                           ["tracebacks"]) == expected_value
                assert len(param_data[DecoratorType.BEFORE_TEST]
                           ["performance"]) == expected_value
Exemplo n.º 4
0
def test_bad_suite_inputs():

    try:
        runner7 = Runner([ErrorSuite6])
        runner7.run()
        raise Exception("Expected BadParameters error to be thrown")
    except BadParameters:
        pass  # expected
Exemplo n.º 5
0
def test_component():
    runner = Runner([SkipFeature])
    aggregator = runner.run(features=["Login"])
    metrics = aggregator.get_basic_report()["tests"]
    assert metrics[TestCategory.SUCCESS] == 3
    suites = runner.get_executed_suites()

    metrics = suites[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics, expected_status="success")
Exemplo n.º 6
0
def test_parameters_plus_plus():
    runner = Runner([ParametersSuite])
    aggregator = runner.run()
    metrics = aggregator.get_basic_report()["tests"]
    assert metrics[TestCategory.SUCCESS] == 36
    assert metrics[TestCategory.IGNORE] == 4
    suites = runner.get_executed_suites()
    metrics = suites[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics, expected_status="fail")
Exemplo n.º 7
0
def test_component():

    runner = Runner([SkipTests])
    aggregator = runner.run(tests=[SkipTests.test_1, SkipTests.test_2])
    metrics = aggregator.get_basic_report()["tests"]

    assert metrics[TestCategory.SUCCESS] == 2
    assert metrics[TestCategory.SKIP] == 1

    suites = runner.get_executed_suites()
    metrics = suites[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics, expected_status="success")
def test_bad_runner_initiation1():
    try:
        Runner(suites=None)
        raise AssertionError(
            "Must have raised exception because bad args were passed in")
    except Exception as error:
        assert isinstance(error,
                          BadParameters), "Type of exception is incorrect"
Exemplo n.º 9
0
def test_reporting():
    f = __file__.replace("test_reporting.py",
                         "test_{}".format(int(time.time())))
    html = "{}.html".format(f)
    xml = "{}.xml".format(f)
    runner = Runner([Login, LoginSessions, Dashboard],
                    monitor_resources=True,
                    html_report=html,
                    xml_report=xml)
    runner.run()

    suites = runner.get_executed_suites()
    for suite in suites:
        suite.get_average_performance_of_after_class()
        suite.get_average_performance_of_before_class()
        suite.get_average_performance_of_after_test()
        suite.get_average_performance_of_before_test()
Exemplo n.º 10
0
def test_runner_with_config():

    Cmd.run(['python', EXE, 'config', 'restore', '--all'])
    Cmd.run(['python', EXE, 'config', 'update', '-k', 'ui'])
    Cmd.run(['python', EXE, 'config', 'update', '-g', 'sso'])
    runner = Runner(suites=[ShoppingCartSuite, AuthApiSuite],
                    config=Config.get_config_path(CliConstants.TJ_CONFIG_NAME))
    runner.run()

    results = runner.get_executed_suites()
    tests_ShoppingCartSuite = results[0].get_test_objects()
    tests_AuthApiSuite = results[1].get_test_objects()
    pprint.pprint(results[0].metrics.get_metrics())

    metrics = results[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics, expected_status="success")
    print(len(tests_ShoppingCartSuite))
    assert len(tests_ShoppingCartSuite) == 5, \
        "Expected 5 tests to be executed, Actually executed: {}".format(len(tests_ShoppingCartSuite))
    assert len(tests_AuthApiSuite) == 6, \
        "Expected 6 tests to be executed, Actually executed: {}".format(len(tests_AuthApiSuite))
    for test in tests_ShoppingCartSuite:
        for class_param, class_data in test.metrics.get_metrics().items():
            for param, metrics in class_data.items():
                QualityManager.check_test_metrics(
                    metrics,
                    expected_status="success",
                    expected_param=param,
                    expected_class_param=metrics["class_param"])
    for test in tests_AuthApiSuite:
        for class_param, class_data in test.metrics.get_metrics().items():
            for param, metrics in class_data.items():
                QualityManager.check_test_metrics(
                    metrics,
                    expected_status="skip",
                    expected_param=param,
                    expected_class_param=metrics["class_param"],
                    expected_retry_count=2,
                    expected_exception_count=2,
                    expected_performance_count=2)
Exemplo n.º 11
0
from test_junkie.runner import Runner

from organization_tut.Header1 import Header1
from organization_tut.Header2 import Header2
from organization_tut.Labels import Feature, Component, Tag

runner = Runner(suites=[Header1, Header2])
runner.run(features=[Feature.HEADER],
           components=[Component.SEARCH],
           tag_config={"run_on_match_any": [Tag.LOGGED_OUT, Tag.LOGGED_IN]})
from test_junkie.runner import Runner
from tests.home.login_tests_usingtj import LoginTestsTJ

runner = Runner(suites=[LoginTestsTJ])
runner.run()

Exemplo n.º 13
0
from test_junkie.constants import TestCategory, DecoratorType
from test_junkie.runner import Runner
from tests.QualityManager import QualityManager
from tests.junkie_suites.BasicSuite import BasicSuite
from tests.junkie_suites.ExecutionSquence import ExecutionSequence1, ExecutionSequence2, ExecutionSequence3, \
    ExecutionSequence4
from tests.junkie_suites.ParametersSuite import ParametersSuite

runner = Runner([BasicSuite])
runner.run()
results = runner.get_executed_suites()
tests = results[0].get_test_objects()


def test_class_metrics():

    metrics = results[0].metrics.get_metrics()
    QualityManager.check_class_metrics(
        metrics,
        expected_status="fail",
        expected_beforeclass_exception_count=1,
        expected_beforeclass_exception_object=None,
        expected_beforeclass_performance_count=1,
        expected_afterclass_exception_count=1,
        expected_afterclass_exception_object=None,
        expected_afterclass_performance_count=1,
        expected_beforetest_exception_count=8,
        expected_beforetest_exception_object=None,
        expected_beforetest_performance_count=8,
        expected_aftertest_exception_count=8,
        expected_aftertest_exception_object=None,
Exemplo n.º 14
0
import pprint
import threading
import sys

sys.path.insert(1, __file__.split("tests")[0])
from tests.QualityManager import QualityManager
from test_junkie.runner import Runner
from tests.junkie_suites.CancelSuite import CancelSuite

runner = Runner([CancelSuite])
thread = threading.Thread(target=runner.run, args=())
runner.cancel()
thread.start()
thread.join()
results = runner.get_executed_suites()

pprint.pprint(results[0].metrics.get_metrics())
for test in results[0].get_test_objects():
    print(test.get_function_name())
    pprint.pprint(test.metrics.get_metrics())


def test_class_metrics():

    metrics = results[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics,
                                       expected_retry_count=0,
                                       expected_status="cancel")


def test_test_metrics():
import pprint

from test_junkie.runner import Runner
from tests.QualityManager import QualityManager
from tests.junkie_suites.AfterClassAssertionSuite import AfterClassAssertionSuite

runner = Runner([AfterClassAssertionSuite])
runner.run()
results = runner.get_executed_suites()

pprint.pprint(results[0].metrics.get_metrics())
for test in results[0].get_test_objects():
    print(test.get_function_name())
    pprint.pprint(test.metrics.get_metrics())


def test_class_metrics():

    metrics = results[0].metrics.get_metrics()
    QualityManager.check_class_metrics(
        metrics,
        expected_beforeclass_exception_count=1,
        expected_beforeclass_exception_object=None,
        expected_beforeclass_performance_count=1,
        expected_afterclass_exception_count=1,
        expected_afterclass_exception_object=Exception,
        expected_afterclass_performance_count=1)


def test_test_metrics():
Exemplo n.º 16
0
from test_junkie.runner import Runner
from tests.QualityManager import QualityManager
from tests.junkie_suites.BeforeClassExceptionSuite import BeforeClassExceptionSuite

runner = Runner([BeforeClassExceptionSuite])
runner.run()
results = runner.get_executed_suites()


def test_class_metrics():

    metrics = results[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics,
                                       expected_status="fail",
                                       expected_retry_count=2,
                                       expected_beforeclass_exception_count=2,
                                       expected_beforeclass_exception_object=Exception,
                                       expected_beforeclass_performance_count=2)


def test_test_metrics():

    assert results[0].get_test_objects()
    for test in results[0].get_test_objects():

        metrics = test.metrics.get_metrics()["None"]["None"]
        QualityManager.check_test_metrics(metrics,
                                          expected_status="ignore",
                                          expected_exception_count=1,
                                          expected_performance_count=1,
                                          expected_retry_count=1,
Exemplo n.º 17
0
import pprint

from test_junkie.constants import TestCategory
from test_junkie.runner import Runner
from tests.QualityManager import QualityManager
from tests.junkie_suites.FeatureAggregations import LoginSessions, Login, Dashboard

runner = Runner([Login, LoginSessions, Dashboard])
runner_metrics = runner.run()
results = runner.get_executed_suites()
tests = results[0].get_test_objects()
pprint.pprint(results[0].metrics.get_metrics())
for test in results[0].get_test_objects():
    print(test.get_function_name())
    pprint.pprint(test.metrics.get_metrics())


def test_class_metrics():

    metrics = results[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics)


def test_test_metrics():
    for test in tests:
        metrics = test.metrics.get_metrics()["None"]["None"]
        QualityManager.check_test_metrics(metrics)


def test_advanced_aggregation_metrics():
    metrics = runner_metrics.get_report_by_features()
Exemplo n.º 18
0
from test_junkie.runner import Runner

from threading_tut.suites.ExampleSuiteA import ExampleSuiteA
from threading_tut.suites.ExampleSuiteB import ExampleSuiteB

runner = Runner(suites=[ExampleSuiteA, ExampleSuiteB])
runner.run(suite_multithreading_limit=2, test_multithreading_limit=10)
Exemplo n.º 19
0
import pprint

from test_junkie.runner import Runner
from tests.QualityManager import QualityManager
from tests.junkie_suites.SkipSuite import SkipSuite

runner = Runner([SkipSuite])
runner.run()
results = runner.get_executed_suites()

pprint.pprint(results[0].metrics.get_metrics())
for test in results[0].get_test_objects():
    print(test.get_function_name())
    pprint.pprint(test.metrics.get_metrics())


def test_class_metrics():

    metrics = results[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics,
                                       expected_status="skip",
                                       expected_retry_count=0)


def test_test_metrics():

    assert results[0].get_test_objects()
    for test in results[0].get_test_objects():

        properties = test.metrics.get_metrics()
        assert len(properties) == 0
Exemplo n.º 20
0
import pprint
import sys

sys.path.insert(1, __file__.split("tests")[0])
from tests.QualityManager import QualityManager
from test_junkie.debugger import LogJunkie
LogJunkie.enable_logging(10)

from test_junkie.runner import Runner
from tests.junkie_suites.AdvancedSuite import AdvancedSuite

runner = Runner([AdvancedSuite])
runner.run(suite_multithreading=True,
           suite_multithreading_limit=5,
           test_multithreading=True,
           test_multithreading_limit=5,
           tag_config={
               "run_on_match_all": ["critical", "v2"],
               "run_on_match_any": ["critical2"],
               "skip_on_match_all": ["skip", "v2"],
               "skip_on_match_any": ["trivial"]
           })
results = runner.get_executed_suites()
tests = results[0].get_test_objects()

pprint.pprint(results[0].metrics.get_metrics())
for test in tests:
    print(test.get_function_name())
    pprint.pprint(test.metrics.get_metrics())

Exemplo n.º 21
0
import pprint

from test_junkie.runner import Runner
from tests.QualityManager import QualityManager
from tests.junkie_suites.AfterTestAssertionSuite import AfterTestAssertionSuite
from tests.junkie_suites.AfterTestExceptionSuite import AfterTestExceptionSuite
from tests.junkie_suites.BeforeTestAssertionSuite import BeforeTestAssertionSuite
from tests.junkie_suites.BeforeTestExceptionSuite import BeforeTestExceptionSuite

runner = Runner([BeforeTestAssertionSuite])
runner.run()
results = runner.get_executed_suites()

pprint.pprint(results[0].metrics.get_metrics())
for test in results[0].get_test_objects():
    print(test.get_function_name())
    pprint.pprint(test.metrics.get_metrics())


def test_class_metrics():

    metrics = results[0].metrics.get_metrics()
    pprint.pprint(metrics)
    QualityManager.check_class_metrics(
        metrics,
        expected_retry_count=2,
        expected_status="fail",
        expected_beforetest_exception_count=8,
        expected_beforetest_exception_object=AssertionError,
        expected_beforetest_performance_count=8)
Exemplo n.º 22
0
import pprint

from test_junkie.errors import BadParameters, BadSignature
from test_junkie.runner import Runner
from tests.QualityManager import QualityManager
from tests.junkie_suites.IgnoreSuite import IgnoreSuiteBoundMethod, IgnoreSuiteFunction, IgnoreSuiteClassic, \
    IgnoreSuiteClassic2, IgnoreSuiteClassic3
from tests.junkie_suites.error_handling.ErrorSuite4 import ErrorSuite4
from tests.junkie_suites.error_handling.ErrorSuite5 import ErrorSuite5
from tests.junkie_suites.error_handling.ErrorSuite6 import ErrorSuite6

runner1 = Runner([IgnoreSuiteBoundMethod])
runner1.run()
results1 = runner1.get_executed_suites()

pprint.pprint(results1[0].metrics.get_metrics())
for test in results1[0].get_test_objects():
    print(test.get_function_name())
    pprint.pprint(test.metrics.get_metrics())


def test_class_metrics():

    metrics = results1[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics,
                                       expected_status="ignore",
                                       expected_retry_count=0)


def test_test_metrics():
Exemplo n.º 23
0
import pprint

from test_junkie.debugger import LogJunkie
from tests.QualityManager import QualityManager

LogJunkie.enable_logging(10)
from test_junkie.runner import Runner
from tests.junkie_suites.Retry import Retries

runner = Runner([Retries])
runner.run()
results = runner.get_executed_suites()
tests = results[0].get_test_objects()
pprint.pprint(results[0].metrics.get_metrics())
for test in results[0].get_test_objects():
    print(test.get_function_name())
    pprint.pprint(test.metrics.get_metrics())


def test_class_metrics():

    metrics = results[0].metrics.get_metrics()
    QualityManager.check_class_metrics(metrics,
                                       expected_status="fail",
                                       expected_retry_count=3)


def test_retry_on_exception():

    tested = False
    for test in tests:
Exemplo n.º 24
0
    def run_suites(self, args):
        def tags():
            config = {
                "run_on_match_all": args.run_on_match_all,
                "run_on_match_any": args.run_on_match_any,
                "skip_on_match_all": args.skip_on_match_all,
                "skip_on_match_any": args.skip_on_match_any
            }
            for prop, value in config.items():
                if value is not None:
                    return config
            return None

        if self.suites:
            print("[{status}] Running tests ...\n".format(
                status=CliUtils.format_color_string(value="INFO",
                                                    color="blue")))
            try:
                runner = Runner(suites=self.suites,
                                html_report=args.html_report,
                                xml_report=args.xml_report,
                                config=self.execution_config)
                runner.run(
                    test_multithreading_limit=args.test_multithreading_limit,
                    suite_multithreading_limit=args.suite_multithreading_limit,
                    tests=args.tests,
                    owners=args.owners,
                    components=args.components,
                    features=args.features,
                    tag_config=tags(),
                    quiet=args.quiet)
            except KeyboardInterrupt:
                print("(Ctrl+C) Exiting!")
                exit(12)
            except:
                print("[{status}] Unexpected error during test execution.".
                      format(status=CliUtils.format_color_string(value="ERROR",
                                                                 color="red")))
                CliUtils.print_color_traceback()
                exit(120)
            finally:
                if self.coverage is not None:
                    self.coverage.stop()
                    self.coverage.save()
                    import coverage
                    try:
                        print("[{status}] Code coverage report:".format(
                            status=CliUtils.format_color_string(value="INFO",
                                                                color="blue")))
                        self.coverage.report(show_missing=True,
                                             skip_covered=True)
                        print(
                            "[{status}] TJ uses Coverage.py. Control it with --cov-rcfile, "
                            "see {link}".format(
                                status=CliUtils.format_color_string(
                                    value="TIP", color="blue"),
                                link=DocumentationLinks.COVERAGE_CONFIG_FILE))
                    except coverage.misc.CoverageException:
                        CliUtils.print_color_traceback()
                        exit(120)
            return
Exemplo n.º 25
0
import pprint

from test_junkie.runner import Runner
from tests.QualityManager import QualityManager
from tests.junkie_suites.IgnoreSuite import IgnoreSuiteBeforeGroupRule, IgnoreSuiteBeforeGroupRule2

runner = Runner([IgnoreSuiteBeforeGroupRule, IgnoreSuiteBeforeGroupRule2])
runner.run()

results = runner.get_executed_suites()
tests = results[0].get_test_objects()
pprint.pprint(results[0].metrics.get_metrics())
for test in tests:
    print(test.get_function_name())
    pprint.pprint(test.metrics.get_metrics())


def test_before_group_exceptions():

    for suite in results:
        metrics = suite.metrics.get_metrics()
        QualityManager.check_class_metrics(metrics,
                                           expected_status="ignore",
                                           expected_retry_count=0)