def test_execution_sequence2(): runner = Runner([ExecutionSequence2]) aggregator = runner.run() metrics = aggregator.get_basic_report()["tests"] assert metrics[TestCategory.SUCCESS] == 0 assert metrics[TestCategory.FAIL] == 6 suites = runner.get_executed_suites() metrics = suites[0].metrics.get_metrics() QualityManager.check_class_metrics( metrics, expected_status="fail", expected_retry_count=2, expected_beforetest_exception_count=20, expected_beforetest_performance_count=20, expected_beforetest_exception_object=None, expected_aftertest_exception_count=20, expected_aftertest_performance_count=20, expected_aftertest_exception_object=AssertionError) for test in suites[0].get_test_objects(): test_metrics = test.metrics.get_metrics() for class_param, class_param_data in test_metrics.items(): for param, param_data in class_param_data.items(): expected_value = 4 if param_data["param"] is not None else 2 assert len(param_data[DecoratorType.AFTER_TEST] ["exceptions"]) == expected_value assert len(param_data[DecoratorType.AFTER_TEST] ["tracebacks"]) == expected_value assert len(param_data[DecoratorType.AFTER_TEST] ["performance"]) == expected_value
def test_execution_sequence4(): runner = Runner([ExecutionSequence4]) aggregator = runner.run() metrics = aggregator.get_basic_report()["tests"] assert metrics[TestCategory.SUCCESS] == 6 assert metrics[TestCategory.FAIL] == 0 suites = runner.get_executed_suites() metrics = suites[0].metrics.get_metrics() QualityManager.check_class_metrics(metrics, expected_status="success") for test in suites[0].get_test_objects(): test_metrics = test.metrics.get_metrics() for class_param, class_param_data in test_metrics.items(): for param, param_data in class_param_data.items(): expected_value = 0 assert len(param_data[DecoratorType.AFTER_TEST] ["exceptions"]) == expected_value assert len(param_data[DecoratorType.AFTER_TEST] ["tracebacks"]) == expected_value assert len(param_data[DecoratorType.AFTER_TEST] ["performance"]) == expected_value assert len(param_data[DecoratorType.BEFORE_TEST] ["exceptions"]) == expected_value assert len(param_data[DecoratorType.BEFORE_TEST] ["tracebacks"]) == expected_value assert len(param_data[DecoratorType.BEFORE_TEST] ["performance"]) == expected_value
def test_runner_without_config(): Cmd.run(['python', EXE, 'config', 'restore', '--all']) Cmd.run(['python', EXE, 'config', 'update', '-k', 'ui']) Cmd.run(['python', EXE, 'config', 'update', '-g', 'sso']) runner = Runner(suites=[NewProductsSuite]) runner.run() results = runner.get_executed_suites() tests = results[0].get_test_objects() metrics = results[0].metrics.get_metrics() QualityManager.check_class_metrics(metrics, expected_status="success") assert len( tests ) == 5, "Expected 5 tests to be executed, Actually executed: {}".format( len(tests)) for test in tests: for class_param, class_data in test.metrics.get_metrics().items(): for param, metrics in class_data.items(): QualityManager.check_test_metrics( metrics, expected_status="success", expected_param=param, expected_class_param=metrics["class_param"])
def test_before_group_exceptions(): for suite in results: metrics = suite.metrics.get_metrics() QualityManager.check_class_metrics(metrics, expected_status="ignore", expected_retry_count=0)
def test_parameters_plus_plus(): runner = Runner([ParametersSuite]) aggregator = runner.run() metrics = aggregator.get_basic_report()["tests"] assert metrics[TestCategory.SUCCESS] == 36 assert metrics[TestCategory.IGNORE] == 4 suites = runner.get_executed_suites() metrics = suites[0].metrics.get_metrics() QualityManager.check_class_metrics(metrics, expected_status="fail")
def test_class_metrics(): metrics = results[0].metrics.get_metrics() QualityManager.check_class_metrics(metrics, expected_status="fail", expected_retry_count=2, expected_beforeclass_exception_count=2, expected_beforeclass_exception_object=Exception, expected_beforeclass_performance_count=2)
def test_component(): runner = Runner([SkipFeature]) aggregator = runner.run(features=["Login"]) metrics = aggregator.get_basic_report()["tests"] assert metrics[TestCategory.SUCCESS] == 3 suites = runner.get_executed_suites() metrics = suites[0].metrics.get_metrics() QualityManager.check_class_metrics(metrics, expected_status="success")
def test_after_test_error3(): metrics6 = results6[0].metrics.get_metrics() QualityManager.check_class_metrics( metrics6, expected_status="fail", expected_retry_count=1, expected_aftertest_performance_count=1, expected_aftertest_exception_object=Exception, expected_aftertest_exception_count=1)
def test_before_test_error1(): metrics5 = results5[0].metrics.get_metrics() QualityManager.check_class_metrics( metrics5, expected_status="fail", expected_retry_count=1, expected_beforetest_performance_count=1, expected_beforetest_exception_object=Exception, expected_beforetest_exception_count=1)
def test_class_metrics(): metrics = results[0].metrics.get_metrics() QualityManager.check_class_metrics( metrics, expected_beforeclass_exception_count=1, expected_beforeclass_exception_object=None, expected_beforeclass_performance_count=1, expected_afterclass_exception_count=1, expected_afterclass_exception_object=Exception, expected_afterclass_performance_count=1)
def test_class_metrics3(): metrics = results3[0].metrics.get_metrics() pprint.pprint(metrics) QualityManager.check_class_metrics( metrics, expected_retry_count=2, expected_status="fail", expected_aftertest_exception_count=8, expected_aftertest_exception_object=AssertionError, expected_aftertest_performance_count=8)
def test_component(): runner = Runner([SkipTests]) aggregator = runner.run(tests=[SkipTests.test_1, SkipTests.test_2]) metrics = aggregator.get_basic_report()["tests"] assert metrics[TestCategory.SUCCESS] == 2 assert metrics[TestCategory.SKIP] == 1 suites = runner.get_executed_suites() metrics = suites[0].metrics.get_metrics() QualityManager.check_class_metrics(metrics, expected_status="success")
def test_runner_with_config(): Cmd.run(['python', EXE, 'config', 'restore', '--all']) Cmd.run(['python', EXE, 'config', 'update', '-k', 'ui']) Cmd.run(['python', EXE, 'config', 'update', '-g', 'sso']) runner = Runner(suites=[ShoppingCartSuite, AuthApiSuite], config=Config.get_config_path(CliConstants.TJ_CONFIG_NAME)) runner.run() results = runner.get_executed_suites() tests_ShoppingCartSuite = results[0].get_test_objects() tests_AuthApiSuite = results[1].get_test_objects() pprint.pprint(results[0].metrics.get_metrics()) metrics = results[0].metrics.get_metrics() QualityManager.check_class_metrics(metrics, expected_status="success") print(len(tests_ShoppingCartSuite)) assert len(tests_ShoppingCartSuite) == 5, \ "Expected 5 tests to be executed, Actually executed: {}".format(len(tests_ShoppingCartSuite)) assert len(tests_AuthApiSuite) == 6, \ "Expected 6 tests to be executed, Actually executed: {}".format(len(tests_AuthApiSuite)) for test in tests_ShoppingCartSuite: for class_param, class_data in test.metrics.get_metrics().items(): for param, metrics in class_data.items(): QualityManager.check_test_metrics( metrics, expected_status="success", expected_param=param, expected_class_param=metrics["class_param"]) for test in tests_AuthApiSuite: for class_param, class_data in test.metrics.get_metrics().items(): for param, metrics in class_data.items(): QualityManager.check_test_metrics( metrics, expected_status="skip", expected_param=param, expected_class_param=metrics["class_param"], expected_retry_count=2, expected_exception_count=2, expected_performance_count=2)
def test_class_metrics3(): metrics = results3[0].metrics.get_metrics() QualityManager.check_class_metrics(metrics, expected_status="ignore", expected_retry_count=0)
def test_class_metrics(): metrics = results[0].metrics.get_metrics() QualityManager.check_class_metrics(metrics, expected_retry_count=0, expected_status="cancel")
def test_class_metrics(): metrics = results[0].metrics.get_metrics() QualityManager.check_class_metrics(metrics)