sys.exit(1) # Login the test client test_utils.login_tester_account(test_client) servers_info = test_utils.get_config_data() node_name = "all" if args['pkg'] is not None: node_name = args['pkg'].split('.')[-1] try: for server in servers_info: print( "\n=============Running the test cases for '%s'=============" % server['name'], file=sys.stderr) # Create test server server_information = test_utils.create_parent_server_node(server) # Create test database with random number to avoid conflict in # parallel execution on different platforms. This database will be # used across all feature tests. test_db_name = "acceptance_test_db" + \ str(random.randint(10000, 65535)) connection = test_utils.get_db_connection( server['db'], server['username'], server['db_password'], server['host'], server['port'], server['sslmode']) # Add the server version in server information server_information['server_version'] = connection.server_version server_information['type'] = server['type'] # Drop the database if already exists.
def execute_test(test_module_list_passed, server_passed, driver_passed): """ Function executes actually test :param test_module_list_passed: :param server_passed: :param driver_passed: :return: """ try: print("\n=============Running the test cases for '%s' =============" % server_passed['name'], file=sys.stderr) # Create test server server_information = \ test_utils.create_parent_server_node(server_passed) # Create test database with random number to avoid conflict in # parallel execution on different platforms. This database will be # used across all feature tests. test_db_name = "acceptance_test_db" + \ str(random.randint(10000, 65535)) connection = test_utils.get_db_connection( server_passed['db'], server_passed['username'], server_passed['db_password'], server_passed['host'], server_passed['port'], server_passed['sslmode'] ) # Add the server version in server information server_information['server_version'] = connection.server_version server_information['type'] = server_passed['type'] # Drop the database if already exists. test_utils.drop_database(connection, test_db_name) # Create database test_utils.create_database(server_passed, test_db_name) # Configure preferences for the test cases test_utils.configure_preferences( default_binary_path=server_passed['default_binary_paths']) # Get unit test suit suite = get_suite(test_module_list_passed, server_passed, test_client, server_information, test_db_name, driver_passed) # Run unit test suit created tests = unittest.TextTestRunner(stream=sys.stderr, descriptions=True, verbosity=2).run(suite) # processing results ran_tests, failed_cases, skipped_cases, passed_cases = \ get_tests_result(tests) # This is required when some tests are running parallel # & some sequential in case of parallel ui tests if threading.current_thread().getName() == "sequential_tests": try: if test_result[server_passed['name']][0] is not None: ran_tests = test_result[server_passed['name']][0] + \ ran_tests failed_cases.update(test_result[server_passed['name']][1]) skipped_cases.update(test_result[server_passed['name']][2]) passed_cases.update(test_result[server_passed['name']][3]) test_result[server_passed['name']] = [ran_tests, failed_cases, skipped_cases, passed_cases] except KeyError: pass # Add final results server wise in test_result dict test_result[server_passed['name']] = [ran_tests, failed_cases, skipped_cases, passed_cases] # Set empty list for 'passed' parameter for each testRun. # So that it will not append same test case name # unittest.result.TestResult.passed = [] # Drop the testing database created initially if connection: test_utils.drop_database(connection, test_db_name) connection.close() # Delete test server # test_utils.delete_test_server(test_client) test_utils.delete_server(test_client, server_information) except Exception as exc: traceback.print_exc(file=sys.stderr) print(str(exc)) print("Exception in {0} {1}".format( threading.current_thread().ident, threading.currentThread().getName())) finally: # Delete web-driver instance thread_name = "parallel_tests" + server_passed['name'] if threading.currentThread().getName() == thread_name: test_utils.quit_webdriver(driver_passed) time.sleep(20) # Print info about completed tests print( "\n=============Completed the test cases for '%s'=============" % server_passed['name'], file=sys.stderr)
test_module_list = get_test_modules(args) # Login the test client test_utils.login_tester_account(test_client) servers_info = test_utils.get_config_data() node_name = "all" if args['pkg'] is not None: node_name = args['pkg'].split('.')[-1] try: for server in servers_info: print( "\n=============Running the test cases for '%s'=============" % server['name'], file=sys.stderr) # Create test server test_utils.create_parent_server_node(server) suite = get_suite(test_module_list, server, test_client) tests = unit_test.TextTestRunner(stream=sys.stderr, descriptions=True, verbosity=2).run(suite) ran_tests, failed_cases, skipped_cases, passed_cases = \ get_tests_result(tests) test_result[server['name']] = [ ran_tests, failed_cases, skipped_cases, passed_cases ] # Set empty list for 'passed' parameter for each testRun. # So that it will not append same test case name unit_test.result.TestResult.passed = []
except Exception as e: print(str(e)) sys.exit(1) # Login the test client test_utils.login_tester_account(test_client) servers_info = test_utils.get_config_data() node_name = "all" if args['pkg'] is not None: node_name = args['pkg'].split('.')[-1] try: for server in servers_info: print("\n=============Running the test cases for '%s'=============" % server['name'], file=sys.stderr) # Create test server server_information = test_utils.create_parent_server_node(server) # Create test database with random number to avoid conflict in # parallel execution on different platforms. This database will be # used across all feature tests. test_db_name = "acceptance_test_db" + \ str(random.randint(10000, 65535)) connection = test_utils.get_db_connection( server['db'], server['username'], server['db_password'], server['host'], server['port'], server['sslmode'] )