def test_runner_handle_exceptions_in_module_teardown(with_dec_classpath, local_config, tmpdir, mock_pm): """ Check that if we got exception in the module teardown tests will not be failed. :return: """ var_dir = _ensure_var_dir(tmpdir) xunit_file = _ensure_xunit_file_empty(var_dir) suite_var_dir = str(var_dir.mkdir('suite-mock')) config_path = str(var_dir.join('config.yaml')) config = deepcopy(local_config) config.update({ 'artifacts': {}, 'suite_var_dir': suite_var_dir, 'suite_dir': join(dirname(__file__), 'res', 'decorators', 'suites'), 'remote': { 'suite_var_dir': suite_var_dir, }, 'config_path': config_path, }) ssh_pool = LocalPool(local_config['ssh']) ssh_pool.connect() for class_name in [ 'mock_test_module_with_generic_exceptions_in_teardown', 'mock_test_module_with_tiden_exceptions_in_teardown' ]: modules = { 'mock3.mock_test_module_with_exceptions_in_teardown': { 'path': '%s/mock3/mock_test_module_with_exceptions_in_teardown.py' % config['suite_dir'], 'module_short_name': class_name, } } tr = TidenRunner(config, modules=modules, ssh_pool=ssh_pool, plugin_manager=mock_pm, xunit_path=xunit_file) tr.process_tests() res = tr.get_tests_results() _tests = res.get_tests() print(_tests) assert len(_tests) == 2 assert res.get_tests_num('pass') == 2
def test_java_app(with_java_app_classpath, local_config, tmpdir, mock_pm): from tiden.result import Result from tiden.localpool import LocalPool from tiden.tidenfabric import TidenFabric from copy import deepcopy from datetime import datetime var_dir = str(tmpdir.mkdir('var')) xunit_file = str(tmpdir.join('var').join('xunit.xml')) tmpdir.join('var').join('xunit.xml').write('', ensure=True) report_path = 'report.yaml' config = deepcopy(local_config) config.update({ 'suite_name': 'mock', 'test_name': '*', 'suite_dir': join(dirname(__file__), 'res', 'java_app', 'suites'), 'dir_prefix': f'mock-{datetime.now().strftime("%y%m%d-%H%M%S")}', }) config.update({ 'suite_var_dir': str(tmpdir.join('var').mkdir(config['dir_prefix'])), 'remote': { 'artifacts': join(config['environment']['home'], 'artifacts'), 'suite_var_dir': join(config['environment']['home'], config['dir_prefix']), }, 'config_path': str(tmpdir.join('var').join('config.yaml')), }) config.update({ 'artifacts': { 'mockapp': { 'type': 'mockapp', 'path': join(var_dir, 'artifacts', 'mockapp'), 'remote_path': join(config['remote']['artifacts'], 'mockapp'), } }, }) ssh_pool = LocalPool(local_config['ssh']) res = Result(xunit_path=xunit_file) modules = { 'mock.mock_test_app': { 'path': '%s/mock/mock_test_app.py' % config['suite_dir'], 'module_short_name': 'mock_test_app', }, } from tiden.tidenrunner import TidenRunner ssh_pool.connect() TidenFabric().setSshPool(ssh_pool) TidenFabric().setConfig(config) tr = TidenRunner(config, modules=modules, ssh_pool=ssh_pool, plugin_manager=mock_pm, xunit_path=xunit_file) tr.process_tests() res = tr.get_tests_results() res.flush_xunit() res.create_testrail_report(config, report_file=str(report_path))
def test_runner_handle_exception_in_module_setup(with_dec_classpath, local_config, tmpdir, mock_pm): """ Check that if we got exception in the module setup no one test executed. :return: """ import pytest var_dir = _ensure_var_dir(tmpdir) xunit_file = _ensure_xunit_file_empty(var_dir) suite_var_dir = str(var_dir.mkdir('suite-mock')) config_path = str(var_dir.join('config.yaml')) config = deepcopy(local_config) config.update({ 'artifacts': {}, 'suite_var_dir': suite_var_dir, 'suite_dir': join(dirname(__file__), 'res', 'decorators', 'suites'), 'remote': { 'suite_var_dir': suite_var_dir, }, 'config_path': config_path, }) ssh_pool = LocalPool(local_config['ssh']) ssh_pool.connect() modules = { 'mock3.mock_test_module_with_exceptions_in_setup': { 'path': '%s/mock3/mock_test_module_with_exceptions_in_setup.py' % config['suite_dir'], 'module_short_name': 'mock_test_module_with_exceptions_in_setup', } } tr = TidenRunner(config, modules=modules, ssh_pool=ssh_pool, plugin_manager=mock_pm, xunit_path=xunit_file) with pytest.raises(SystemExit): tr.process_tests() res = tr.get_tests_results() _tests = res.get_tests() print(_tests) assert len(_tests) == 0 for status in res.statuses: assert res.get_tests_num(status) == 0
def test_priority_decorator_run_tests(with_dec_classpath, local_config, tmpdir, mock_pm): from tiden.result import Result from tiden.localpool import LocalPool from copy import deepcopy var_dir = str(tmpdir.mkdir('var')) suite_var_dir = str(tmpdir.join('var').mkdir('suite-mock2')) remote_suite_var_dir = str( tmpdir.join('var').mkdir('remote').mkdir('suite-mock2')) xunit_file = str(tmpdir.join('var').join('xunit.xml')) tmpdir.join('var').join('xunit.xml').write('', ensure=True) res = Result(xunit_path=xunit_file) config = deepcopy(local_config) config.update({ 'suite_var_dir': suite_var_dir, 'suite_name': 'mock2', 'test_name': '*', 'config_path': '%s/config.yaml' % suite_var_dir, 'suite_dir': join(dirname(__file__), 'res', 'decorators', 'suites'), 'remote': { 'suite_var_dir': remote_suite_var_dir, } }) ssh_pool = LocalPool(local_config['ssh']) ssh_pool.connect() modules = { 'mock2.mock_test_module_with_test_priorities': { 'path': join(config['suite_dir'], 'mock2', 'mock_test_module_with_test_priorities.py'), 'module_short_name': 'mock_test_module_with_test_priorities', }, } from tiden.tidenrunner import TidenRunner tr = TidenRunner(config, modules=modules, ssh_pool=ssh_pool, plugin_manager=mock_pm, xunit_path=xunit_file) tr.process_tests()
def main(): """ Run Tiden tests """ log_print("*** Initialization ***", color='blue') log_print('(c) 2017-{} GridGain Systems. All Rights Reserved'.format( max(datetime.now().year, 2019))) log_print(version) exit_code = None # parse arguments, # load configuration, # initialize working directories config = TidenFabric().setConfig(setup_test_environment( process_args())).obj log_print('The configuration stored in %s' % config['config_path']) logger = _get_default_logger(config) sys.path.insert(0, abspath(getcwd())) pm = PluginManager(config) # prepare artifacts, artifact information is updated into config # this must be done before tests collections, # because some tests are applicable for specific artifacts only log_print('*** Prepare artifacts ***', color='blue') pm.do('before_prepare_artifacts', config) remote_unzip_files, config = prepare(config) if collect_only: # we don't run any test, so no ssh pool nor plugin manager required ssh_pool = None pm = None else: # otherwise, create ssh pool, # and prepare plugins to use it ssh_pool = init_ssh_pool(config) if pm.plugins: log_print('*** Plugins ***', color='blue') for name, plugin in pm.plugins.items(): log_print("%s, version %s" % (name, plugin['TIDEN_PLUGIN_VERSION'])) pm.set(ssh=ssh_pool) # initialize tests runner log_print('*** Runner ***', color='blue') tr = TidenRunner(config, collect_only=collect_only, ssh_pool=ssh_pool, plugin_manager=pm) if len(tr.modules.keys()) == 0: log_print("Error: no test modules found") exit(1) log_print( "%s module(s) matched %s.%s" % (len(tr.modules.keys()), config['suite_name'], config['test_name'])) if collect_only: tr.collect_tests() else: pm.do('before_hosts_setup') init_remote_hosts(ssh_pool, config) pm.do('after_hosts_setup') upload_artifacts(ssh_pool, config, remote_unzip_files) if pm.do_check('before_tests_run'): tr.process_tests() else: exit_code = -1 pm.do('after_tests_run') result = tr.get_tests_results() result.flush_xunit() result.print_summary() result.create_testrail_report(config, report_file=config.get('testrail_report')) print_blue("Execution time %d:%02d:%02d " % hms(int(time()) - result.get_started())) if exit_code: exit(exit_code)
def test_runner_skipped_configurations(with_dec_classpath, local_config, tmpdir, mock_pm): """ Test configurations correctly passed to TestRail report for skipped tests :return: """ var_dir = _ensure_var_dir(tmpdir) xunit_file = _ensure_xunit_file_empty(var_dir) testrail_report_file = _ensure_tr_report_file_empty(var_dir) suite_var_dir = str(var_dir.mkdir('suite-mock')) config_path = str(var_dir.join('config.yaml')) source = 'mock_test_module_with_test_configuration' suite = 'mock' module_name = 'suites.%s.%s.MockTestModuleWithTestConfiguration' % (suite, source) test_prefix = module_name + '.' config = deepcopy(local_config) config.update({ 'artifacts': {}, # 'attrib': 'test_runner', # 'attr_match': 'any', 'suite_var_dir': suite_var_dir, 'suite_dir': join(dirname(__file__), 'res', 'decorators', 'suites'), 'remote': { 'suite_var_dir': suite_var_dir, }, 'config_path': config_path, 'zookeeper_enabled': False, 'pitr_enabled': False, 'compaction_enabled': True, }) ssh_pool = LocalPool(local_config['ssh']) test_module_source_file_name = '%s/%s/%s.py' % (config['suite_dir'], suite, source) modules = { '%s.%s' % (suite, source): { 'path': test_module_source_file_name, 'module_short_name': source, } } test_configuration = '(pitr_enabled=false, compaction_enabled=true, zookeeper_enabled=false)' expected_configuration_options = ['pitr_enabled', 'compaction_enabled', 'zookeeper_enabled'] expected_result = { 'test_main': {'status': 'pass', 'type': None, 'message': None}, 'test_zookeeper_only': {'status': 'skipped', 'type': 'skipped cause of config.zookeeper_enabled is False', 'message': None}, } expected_statuses_count = {'pass': 1, 'fail': 0, 'error': 0, 'skip': 1, 'total': len(expected_result)} from tiden.tidenfabric import TidenFabric TidenFabric().reset().setConfig(config) tr = TidenRunner(config, modules=modules, ssh_pool=ssh_pool, plugin_manager=mock_pm, xunit_path=xunit_file) tr.process_tests() res = tr.get_tests_results() res.create_testrail_report(config, report_file=basename(testrail_report_file)) _tests = res.get_tests() print(_tests) # validate raw test results assert len(_tests) == len(expected_result) for test_to_check in expected_result.keys(): status, error_type, message, test_name = res.get_test_details('{}{}{}'.format(test_prefix, test_to_check, test_configuration)) assert expected_result[test_to_check].get('status') == status assert expected_result[test_to_check].get('type') == error_type if expected_result[test_to_check].get('message') is None: assert message is None else: assert expected_result[test_to_check].get('message') == message \ or expected_result[test_to_check].get('message') in message for status, count in expected_statuses_count.items(): assert res.get_tests_num(status) == count # validate generated TestRail .yaml report tr_report = read_yaml_file(testrail_report_file) assert type({}) == type(tr_report) assert len(_tests) == len(tr_report) for test_run, test in tr_report.items(): assert 'suite_run_id' in test assert 'test_run_id' in test assert test_run == test['test_run_id'] assert 'module' in test assert test['module'] == module_name assert 'test_configuration_options' in test assert expected_configuration_options == test['test_configuration_options'] assert 'function' in test assert test['function'] in expected_result.keys() expected_test_result = expected_result[test['function']] expected_status = res.util_status_to_testrail_status(expected_test_result['status']) assert 'last_status' in test assert expected_status == test['last_status'] # a test message will be either in 'message' or 'type' if 'message' is None assert 'asserts' in test assert type([]) == type(test['asserts']) # currently Tiden generates only one assert per test assert len(test['asserts']) == 1 assert type({}) == type(test['asserts'][0]) assert 'status' in test['asserts'][0] assert expected_status == test['asserts'][0]['status'] expected_assert_message = expected_test_result['message'] if expected_test_result['message'] is not None else \ expected_test_result['type'] if expected_assert_message is not None: assert res.util_filter_escape_seqs(expected_assert_message) in test['asserts'][0]['message'] # check all test run id's are unique test_run_ids = [test['test_run_id'] for test in tr_report.values()] assert len(test_run_ids) == len(set(test_run_ids)) # check all suite run id is the same suite_run_ids = set([test['suite_run_id'] for test in tr_report.values()]) assert 1 == len(suite_run_ids)
def test_runner_repeated_test_continue_on_fail(with_dec_classpath, local_config, tmpdir, mock_pm): """ This test is for testing test option repeated_test_continue_on_fail. It should have higher priority than decorator. If it passed through test options (like this: -to=repeated_test_continue_on_fail=True) then test with repeated test decorators will be executed even if some iteration will be failed. :return: """ var_dir = _ensure_var_dir(tmpdir) xunit_file = _ensure_xunit_file_empty(var_dir) suite_var_dir = str(var_dir.mkdir('suite-mock')) config_path = str(var_dir.join('config.yaml')) test_prefix = 'suites.mock3.mock_test_module_with_decorators.MockTestModuleWithDecorators.' iterations = 5 config = deepcopy(local_config) config.update({ 'artifacts': {}, 'suite_var_dir': suite_var_dir, 'suite_dir': join(dirname(__file__), 'res', 'decorators', 'suites'), 'remote': { 'suite_var_dir': suite_var_dir, }, 'config_path': str(config_path), 'repeated_test': iterations, 'repeated_test_continue_on_fail': True }) ssh_pool = LocalPool(local_config['ssh']) modules = { 'mock3.mock_test_module_with_decorators': { 'path': '%s/mock3/mock_test_module_with_decorators.py' % config['suite_dir'], 'module_short_name': 'mock_test_module_with_decorators', } } expected_result = { 'test_not_repeated_test': { 'status': 'pass', 'type': None, 'message': None, 'remote_dirs': [ 'test_not_repeated_test_1', 'test_not_repeated_test_2', 'test_not_repeated_test_3', 'test_not_repeated_test_4', 'test_not_repeated_test_5', ], }, 'test_repeated_test': { 'status': 'pass', 'type': None, 'message': None, 'remote_dirs': [ 'test_repeated_test_1', 'test_repeated_test_2', 'test_repeated_test_3', 'test_repeated_test_4', 'test_repeated_test_5', ], }, 'test_with_repeated_test_and_full_test_names': { 'status': 'pass', 'type': None, 'message': None, 'remote_dirs': [ 'test_with_repeated_test_and_full_test_names_first', 'test_with_repeated_test_and_full_test_names_second', 'test_with_repeated_test_and_full_test_names_3', 'test_with_repeated_test_and_full_test_names_4', 'test_with_repeated_test_and_full_test_names_5', ], }, 'test_with_repeated_test_and_not_full_test_names': { 'status': 'pass', 'type': None, 'message': None, 'remote_dirs': [ 'test_with_repeated_test_and_not_full_test_names_example', 'test_with_repeated_test_and_not_full_test_names_2', 'test_with_repeated_test_and_not_full_test_names_3', 'test_with_repeated_test_and_not_full_test_names_4', 'test_with_repeated_test_and_not_full_test_names_5', ], }, 'test_with_repeated_test_and_fail_on_iteration_3': { 'status': 'fail', 'type': 'TidenException', 'message': 'TidenException(\'Exception on iteration 3\')', 'test_name': 'test_with_repeated_test_and_fail_on_iteration_3', 'remote_dirs': ['test_with_repeated_test_and_fail_on_iteration_3_first', 'test_with_repeated_test_and_fail_on_iteration_3_second', 'test_with_repeated_test_and_fail_on_iteration_3_3', 'test_with_repeated_test_and_fail_on_iteration_3_4', 'test_with_repeated_test_and_fail_on_iteration_3_5' ] }, } expected_statuses_count = {'pass': len(expected_result) - 1, 'fail': 1, 'error': 0, 'skip': 0, 'total': len(expected_result)} tr = TidenRunner(config, modules=modules, ssh_pool=ssh_pool, plugin_manager=mock_pm, xunit_path=xunit_file) tr.process_tests() res = tr.get_tests_results() _tests = res.get_tests() print(_tests) # to test tests execution we check: # 1. correct result in the Results. # 2. to test correct test execution we check correct directory creation + correct logs files generation. assert len(_tests) == len(expected_result) for test_to_check in expected_result.keys(): status, error_type, message, test_name = res.get_test_details('{}{}'.format(test_prefix, test_to_check)) assert expected_result[test_to_check].get('status') == status assert expected_result[test_to_check].get('type') == error_type assert expected_result[test_to_check].get('message') == message \ or expected_result[test_to_check].get('message') in message assert test_name is not None assert test_name in expected_result[test_to_check].get('test_name', test_to_check) # Also check directory and log file exist iteration = 0 for remote_directory in expected_result[test_to_check].get('remote_dirs'): iteration += 1 log_file = '{}/{}/{}/{}_iteration_{}.log'.format(config['rt']['remote']['test_module_dir'], config['rt']['test_class'], remote_directory, test_name, iteration) assert exists(log_file) for status, count in expected_statuses_count.items(): assert res.get_tests_num(status) == count
def test_runner_collect(with_dec_classpath, local_config, tmpdir, mock_pm): var_dir = _ensure_var_dir(tmpdir) xunit_file_collect = _ensure_xunit_file_empty(var_dir, '-collect') xunit_file_process = _ensure_xunit_file_empty(var_dir, '-process') testrail_report_file_collect = _ensure_tr_report_file_empty(var_dir, '-collect') testrail_report_file_process = _ensure_tr_report_file_empty(var_dir, '-process') suite_var_dir = str(var_dir.mkdir('suite-mock')) config_path = str(var_dir.join('config.yaml')) source = 'mock_test_module_with_test_configuration' suite = 'mock' module_name = 'suites.%s.%s.MockTestModuleWithTestConfiguration' % (suite, source) test_prefix = module_name + '.' config = deepcopy(local_config) config.update({ 'artifacts': {}, 'suite_var_dir': suite_var_dir, 'suite_dir': join(dirname(__file__), 'res', 'decorators', 'suites'), 'remote': { 'suite_var_dir': suite_var_dir, }, 'config_path': config_path, 'zookeeper_enabled': False, 'pitr_enabled': False, 'compaction_enabled': True, }) ssh_pool = LocalPool(local_config['ssh']) test_module_source_file_name = '%s/%s/%s.py' % (config['suite_dir'], suite, source) modules = { '%s.%s' % (suite, source): { 'path': test_module_source_file_name, 'module_short_name': source, } } test_configuration = '(pitr_enabled=false, compaction_enabled=true, zookeeper_enabled=false)' expected_configuration_options = ['pitr_enabled', 'compaction_enabled', 'zookeeper_enabled'] expected_result = { 'test_main': {'status': 'pass', 'type': None, 'message': None}, 'test_zookeeper_only': {'status': 'skipped', 'type': 'skipped cause of config.zookeeper_enabled is None', 'message': None}, } from tiden.tidenfabric import TidenFabric TidenFabric().reset().setConfig(config) tr = TidenRunner(config, modules=modules, ssh_pool=ssh_pool, plugin_manager=mock_pm, xunit_path=xunit_file_collect) tr.collect_tests() res = tr.get_tests_results() res.update_xunit() res.create_testrail_report(config, report_file=basename(testrail_report_file_collect)) _tests = res.get_tests() assert 12 == len(_tests) print(_tests) TidenFabric().reset().setConfig(config) tr = TidenRunner(config, modules=modules, ssh_pool=ssh_pool, plugin_manager=mock_pm, xunit_path=xunit_file_process) tr.process_tests() res = tr.get_tests_results() res.create_testrail_report(config, report_file=basename(testrail_report_file_process)) _tests = res.get_tests() assert 2 == len(_tests) print(_tests)
def test_runner_repeated_decorator(with_dec_classpath, local_config, tmpdir, mock_pm): """ This test is for repeated_test decorator. Checks that reporting and execution correspond to repeated_test decorator logic: 1. Test executes as many times as mentioned in decorator or if it fails execution stops. 2. If test passed during all it's iterations it marks as pass and shows as one test in results. 3. Test uses it's unique remote directory (this is the decorator logic). 4. If test fails in some iteration it shows as one failed test in test results and it's name changed to one that contains iteration maker (ex. test_one -> test_one_iteration_5). :return: """ var_dir = _ensure_var_dir(tmpdir) xunit_file = _ensure_xunit_file_empty(var_dir) suite_var_dir = str(var_dir.mkdir('suite-mock')) config_path = str(var_dir.join('config.yaml')) module_name = 'suites.mock3.mock_test_module_with_decorators.MockTestModuleWithDecorators' test_prefix = module_name + '.' config = deepcopy(local_config) config.update({ 'artifacts': {}, 'suite_var_dir': suite_var_dir, 'suite_dir': join(dirname(__file__), 'res', 'decorators', 'suites'), 'remote': { 'suite_var_dir': suite_var_dir, }, 'config_path': config_path, }) ssh_pool = LocalPool(local_config['ssh']) modules = { 'mock3.mock_test_module_with_decorators': { 'path': '%s/mock3/mock_test_module_with_decorators.py' % config['suite_dir'], 'module_short_name': 'mock_test_module_with_decorators', } } expected_result = { 'test_not_repeated_test': { 'status': 'pass', 'type': None, 'message': None, 'remote_dirs': ['test_not_repeated_test'], }, 'test_repeated_test': { 'status': 'pass', 'type': None, 'message': None, 'remote_dirs': ['test_repeated_test_1', 'test_repeated_test_2'], }, 'test_with_repeated_test_and_full_test_names': { 'status': 'pass', 'type': None, 'message': None, 'remote_dirs': ['test_with_repeated_test_and_full_test_names_first', 'test_with_repeated_test_and_full_test_names_second'], }, 'test_with_repeated_test_and_not_full_test_names': { 'status': 'pass', 'type': None, 'message': None, 'remote_dirs': ['test_with_repeated_test_and_not_full_test_names_example', 'test_with_repeated_test_and_not_full_test_names_2'], }, 'test_with_repeated_test_and_fail_on_iteration_3_iteration_3': { 'status': 'fail', 'type': 'TidenException', 'message': 'TidenException(\'Exception on iteration 3\')', 'test_name': 'test_with_repeated_test_and_fail_on_iteration_3', 'remote_dirs': ['test_with_repeated_test_and_fail_on_iteration_3_first', 'test_with_repeated_test_and_fail_on_iteration_3_second', 'test_with_repeated_test_and_fail_on_iteration_3_3'], }, } expected_statuses_count = {'pass': len(expected_result) - 1, 'fail': 1, 'error': 0, 'skip': 0, 'total': len(expected_result)} tr = TidenRunner(config, modules=modules, ssh_pool=ssh_pool, plugin_manager=mock_pm, xunit_path=xunit_file) tr.process_tests() res = tr.get_tests_results() _tests = res.get_tests() print(_tests) # to test tests execution we check: # 1. correct result in the Results. # 2. to test correct test execution we check correct directory creation + correct logs files generation. assert len(_tests) == len(expected_result) for test_to_check in expected_result.keys(): status, error_type, message, test_name = res.get_test_details('{}{}'.format(test_prefix, test_to_check)) assert expected_result[test_to_check].get('status') == status assert expected_result[test_to_check].get('type') == error_type assert expected_result[test_to_check].get('message') == message \ or expected_result[test_to_check].get('message') in message assert test_name is not None assert test_name in expected_result[test_to_check].get('test_name', test_to_check) # Also check directory and log file exist iteration = 0 for remote_directory in expected_result[test_to_check].get('remote_dirs'): iteration += 1 # test_name = expected_result[test_to_check].get('test_name', test_to_check) log_file = '{}/{}/{}/{}_iteration_{}.log'.format(config['rt']['remote']['test_module_dir'], config['rt']['test_class'], remote_directory, test_name, iteration) assert exists(log_file) for status, count in expected_statuses_count.items(): assert res.get_tests_num(status) == count
def test_runner_basic(with_dec_classpath, local_config, tmpdir, mock_pm): """ Just test that after TidenRunner execution we've got correct test results ans correct exceptions in the failed tests. :return: """ var_dir = _ensure_var_dir(tmpdir) xunit_file = _ensure_xunit_file_empty(var_dir) testrail_report_file = _ensure_tr_report_file_empty(var_dir) suite_var_dir = str(var_dir.mkdir('suite-mock')) config_path = str(var_dir.join('config.yaml')) suite = 'mock3' module_short_name = 'mock_test_module_with_exceptions' module_class_name = 'MockTestModuleWithExceptions' module_name = 'suites.%s.%s.%s' % (suite, module_short_name, module_class_name) test_prefix = module_name + '.' config = deepcopy(local_config) config.update({ 'artifacts': {}, 'attrib': 'test_runner', 'attr_match': 'any', 'suite_var_dir': suite_var_dir, 'suite_dir': join(dirname(__file__), 'res', 'decorators', 'suites'), 'remote': { 'suite_var_dir': suite_var_dir, }, 'config_path': config_path, }) ssh_pool = LocalPool(local_config['ssh']) test_module_source_file_name = '%s/%s/%s.py' % (config['suite_dir'], suite, module_short_name) modules = { '%s.%s' % (suite, module_short_name): { 'path': test_module_source_file_name, 'module_short_name': module_short_name, } } expected_result = { 'test_should_pass': {'status': 'pass', 'type': None, 'message': None}, 'test_passed_with_result_message': {'status': 'pass', 'type': None, 'message': 'WOO-HOO'}, 'test_should_fail': {'status': 'fail', 'type': 'TidenException', 'message': 'TidenException(\'Fake exception in test\')'}, 'test_should_be_skipped': {'status': 'skipped', 'type': 'skipped cause of expression evaluates to False at %s:45' % test_module_source_file_name, 'message': None}, 'test_should_be_not_started': {'status': 'skipped_no_start', 'type': 'skipped cause of attrib mismatch', 'message': None}, 'test_with_exception_in_setup': {'status': 'fail', 'type': 'TidenException', 'message': 'TidenException(\'Exception in test setup\')'}, 'test_pass_with_exception_in_teardown': {'status': 'pass', 'type': None, 'message': None}, 'test_fail_with_exception_in_teardown': {'status': 'fail', 'type': 'TidenException', 'message': 'TidenException(\'Fake exception in test\')'}, 'test_should_fail_with_error': {'status': 'error', 'type': 'OSError', 'message': 'IOError(\'Fake IO exception in test\')'}, } expected_statuses_count = {'pass': 3, 'fail': 3, 'error': 1, 'skip': 2, 'total': len(expected_result)} tr = TidenRunner(config, modules=modules, ssh_pool=ssh_pool, plugin_manager=mock_pm, xunit_path=xunit_file) tr.process_tests() res = tr.get_tests_results() _tests = res.get_tests() print(_tests) # validate raw test results assert len(_tests) == len(expected_result) for test_to_check in expected_result.keys(): status, error_type, message, test_name = res.get_test_details('{}{}'.format(test_prefix, test_to_check)) assert expected_result[test_to_check].get('status') == status assert expected_result[test_to_check].get('type') == error_type if expected_result[test_to_check].get('message') is None: assert message is None else: assert expected_result[test_to_check].get('message') == message \ or expected_result[test_to_check].get('message') in message for status, count in expected_statuses_count.items(): assert res.get_tests_num(status) == count # validate generated TestRail .yaml report res.create_testrail_report(config, report_file=basename(testrail_report_file)) tr_report = read_yaml_file(testrail_report_file) assert type({}) == type(tr_report) assert len(_tests) == len(tr_report) for test_run, test in tr_report.items(): assert 'suite_run_id' in test assert 'test_run_id' in test assert test_run == test['test_run_id'] assert 'module' in test assert test['module'] == module_name assert 'test_configuration_options' in test assert [] == test['test_configuration_options'] assert 'function' in test assert test['function'] in expected_result.keys() expected_test_result = expected_result[test['function']] expected_status = res.util_status_to_testrail_status(expected_test_result['status']) assert 'last_status' in test assert expected_status == test['last_status'] # a test message will be either in 'message' or 'type' if 'message' is None assert 'asserts' in test assert type([]) == type(test['asserts']) # currently Tiden generates only one assert per test assert len(test['asserts']) == 1 assert type({}) == type(test['asserts'][0]) assert 'status' in test['asserts'][0] assert expected_status == test['asserts'][0]['status'] expected_assert_message = expected_test_result['message'] if expected_test_result['message'] is not None else \ expected_test_result['type'] if expected_assert_message is not None: assert res.util_filter_escape_seqs(expected_assert_message) in test['asserts'][0]['message'] # check all test run id's are unique test_run_ids = [test['test_run_id'] for test in tr_report.values()] assert len(test_run_ids) == len(set(test_run_ids)) # check all suite run id is the same suite_run_ids = set([test['suite_run_id'] for test in tr_report.values()]) assert 1 == len(suite_run_ids)
def test_class_decorator_process_tests(with_dec_classpath, local_config, tmpdir, mock_pm): from tiden.result import Result from tiden.localpool import LocalPool from tiden.util import cfg from copy import deepcopy var_dir = str(tmpdir.mkdir('var')) suite_var_dir = str(tmpdir.join('var').mkdir('suite-mock')) xunit_file = str(tmpdir.join('var').join('xunit.xml')) tmpdir.join('var').join('xunit.xml').write('', ensure=True) config_path = tmpdir.join('var').join('config.yaml') report_path = 'report.yaml' config = deepcopy(local_config) config.update({ 'artifacts': {}, 'suite_var_dir': suite_var_dir, 'suite_name': 'mock', 'test_name': '*', 'suite_dir': join(dirname(__file__), 'res', 'decorators', 'suites'), 'remote': { 'suite_var_dir': '', }, 'config_path': str(config_path), }) cfg(config, 'pitr_enabled', 'True') cfg(config, 'load_factor', '0.1') ssh_pool = LocalPool(local_config['ssh']) res = Result(xunit_path=xunit_file) modules = { 'mock.mock_test_module': { 'path': '%s/mock/mock_test_module.py' % config['suite_dir'], 'module_short_name': 'mock_test_module', }, 'mock.mock_test_module_with_test_configuration': { 'path': '%s/mock/mock_test_module_with_test_configuration.py' % config['suite_dir'], 'module_short_name': 'mock_test_module_with_test_configuration', }, 'mock.mock_test_module_with_test_configuration_subset': { 'path': '%s/mock/mock_test_module_with_test_configuration_subset.py' % config['suite_dir'], 'module_short_name': 'mock_test_module_with_test_configuration_subset', }, } from tiden.tidenrunner import TidenRunner ssh_pool.connect() tr = TidenRunner(config, modules=modules, ssh_pool=ssh_pool, plugin_manager=mock_pm, xunit_path=xunit_file) tr.process_tests() res = tr.get_tests_results() res.flush_xunit() res.create_testrail_report(config, report_file=str(report_path))