def test_retry5(): tested = False for test in tests: if test.get_function_name() == "retry5": for class_param, class_data in test.metrics.get_metrics().items(): for param, metrics in class_data.items(): if metrics["class_param"] == 1: QualityManager.check_test_metrics( metrics, expected_status="error", expected_exception=Exception, expected_retry_count=4, expected_exception_count=4, expected_performance_count=4, expected_param=param, expected_class_param=metrics["class_param"]) for i in metrics["exceptions"]: assert str(i.with_traceback(None)) == "On purpose" else: QualityManager.check_test_metrics( metrics, expected_status="success", expected_param=param, expected_class_param=metrics["class_param"]) tested = True if not tested: raise Exception("Test did not run")
def test_test_metrics(): assert results[0].get_test_objects() for test in results[0].get_test_objects(): metrics = test.metrics.get_metrics()["None"]["None"] QualityManager.check_test_metrics(metrics)
def test_runner_without_config(): Cmd.run(['python', EXE, 'config', 'restore', '--all']) Cmd.run(['python', EXE, 'config', 'update', '-k', 'ui']) Cmd.run(['python', EXE, 'config', 'update', '-g', 'sso']) runner = Runner(suites=[NewProductsSuite]) runner.run() results = runner.get_executed_suites() tests = results[0].get_test_objects() metrics = results[0].metrics.get_metrics() QualityManager.check_class_metrics(metrics, expected_status="success") assert len( tests ) == 5, "Expected 5 tests to be executed, Actually executed: {}".format( len(tests)) for test in tests: for class_param, class_data in test.metrics.get_metrics().items(): for param, metrics in class_data.items(): QualityManager.check_test_metrics( metrics, expected_status="success", expected_param=param, expected_class_param=metrics["class_param"])
def test_after_test_error4(): assert results6[0].get_test_objects() for test in results6[0].get_test_objects(): if test.get_function_name() == "failure": metrics = test.metrics.get_metrics()["None"]["None"] QualityManager.check_test_metrics(metrics, expected_status="error", expected_exception=Exception)
def test_skip_function(): tested = False for test in tests: if test.get_function_name() == "skip_function": metrics = test.metrics.get_metrics()["None"]["None"] QualityManager.check_test_metrics(metrics, expected_status="skip") tested = True if not tested: raise Exception("Test did not run")
def test_failure2(): tests = results2[0].get_test_objects() for test in tests: metrics = test.metrics.get_metrics()["None"]["None"] QualityManager.check_test_metrics(metrics, expected_status="error", expected_retry_count=4, expected_exception_count=4, expected_performance_count=4, expected_exception=Exception)
def test_failure3(): tests = results3[0].get_test_objects() for test in tests: metrics = test.metrics.get_metrics()["None"]["None"] pprint.pprint(metrics) QualityManager.check_test_metrics(metrics, expected_status="fail", expected_retry_count=4, expected_exception_count=4, expected_performance_count=4, expected_exception=AssertionError)
def test_test_metrics(): assert results[0].get_test_objects() for test in results[0].get_test_objects(): metrics = test.metrics.get_metrics()["None"]["None"] QualityManager.check_test_metrics(metrics, expected_status="ignore", expected_exception_count=1, expected_performance_count=1, expected_retry_count=1, expected_exception=Exception)
def test_no_retry_on_assertion(): tested = False for test in tests: if test.get_function_name() == "no_retry_on_assertion": metrics = test.metrics.get_metrics()["None"]["None"] QualityManager.check_test_metrics(metrics, expected_status="fail", expected_exception=AssertionError) tested = True if not tested: raise Exception("Test did not run")
def test_parameters(): tested = False for test in tests: if test.get_function_name() == "parameters": properties = test.metrics.get_metrics()["None"] for param, metrics in properties.items(): QualityManager.check_test_metrics(metrics, expected_status="success", expected_param=param) tested = True if not tested: raise Exception("Test did not run")
def test_retry_on_exception(): tested = False for test in tests: if test.get_function_name() == "retry_on_exception": metrics = test.metrics.get_metrics()["None"]["None"] QualityManager.check_test_metrics(metrics, expected_retry_count=9, expected_performance_count=9, expected_exception_count=9, expected_status="error", expected_exception=Exception) tested = True if not tested: raise Exception("Test did not run")
def test_no_retry3(): tested = False for test in tests: if test.get_function_name() == "no_retry3": for class_param, class_data in test.metrics.get_metrics().items(): for param, metrics in class_data.items(): QualityManager.check_test_metrics( metrics, expected_status="success", expected_param=param, expected_class_param=metrics["class_param"]) tested = True if not tested: raise Exception("Test did not run")
def test_skip(): tested = False for test in tests: if test.get_function_name() == "skip": for class_param, class_data in test.metrics.get_metrics().items(): for param, metrics in class_data.items(): QualityManager.check_test_metrics( metrics, expected_status="skip", expected_retry_count=2, expected_exception_count=2, expected_performance_count=2) tested = True if not tested: raise Exception("Test did not run")
def test_retry(): tested = False for test in tests: if test.get_function_name() == "retry": for class_param, class_data in test.metrics.get_metrics().items(): for param, metrics in class_data.items(): QualityManager.check_test_metrics( metrics, expected_status="fail", expected_exception=AssertionError, expected_retry_count=4, expected_exception_count=4, expected_performance_count=4, expected_class_param=metrics["class_param"]) for i in metrics["exceptions"]: assert str(i.with_traceback( None)) == "Expected Assertion Error" tested = True if not tested: raise Exception("Test did not run")
def test_runner_with_config(): Cmd.run(['python', EXE, 'config', 'restore', '--all']) Cmd.run(['python', EXE, 'config', 'update', '-k', 'ui']) Cmd.run(['python', EXE, 'config', 'update', '-g', 'sso']) runner = Runner(suites=[ShoppingCartSuite, AuthApiSuite], config=Config.get_config_path(CliConstants.TJ_CONFIG_NAME)) runner.run() results = runner.get_executed_suites() tests_ShoppingCartSuite = results[0].get_test_objects() tests_AuthApiSuite = results[1].get_test_objects() pprint.pprint(results[0].metrics.get_metrics()) metrics = results[0].metrics.get_metrics() QualityManager.check_class_metrics(metrics, expected_status="success") print(len(tests_ShoppingCartSuite)) assert len(tests_ShoppingCartSuite) == 5, \ "Expected 5 tests to be executed, Actually executed: {}".format(len(tests_ShoppingCartSuite)) assert len(tests_AuthApiSuite) == 6, \ "Expected 6 tests to be executed, Actually executed: {}".format(len(tests_AuthApiSuite)) for test in tests_ShoppingCartSuite: for class_param, class_data in test.metrics.get_metrics().items(): for param, metrics in class_data.items(): QualityManager.check_test_metrics( metrics, expected_status="success", expected_param=param, expected_class_param=metrics["class_param"]) for test in tests_AuthApiSuite: for class_param, class_data in test.metrics.get_metrics().items(): for param, metrics in class_data.items(): QualityManager.check_test_metrics( metrics, expected_status="skip", expected_param=param, expected_class_param=metrics["class_param"], expected_retry_count=2, expected_exception_count=2, expected_performance_count=2)
def test_test_metrics(): for test in tests: metrics = test.metrics.get_metrics()["None"]["None"] QualityManager.check_test_metrics(metrics)