Esempio n. 1
0
def run_sequential_tests(url_client, servers_details, sequential_tests_lists,
                         name_of_browser, version_of_browser):
    """
    Function is used to execute tests that needs to be run in sequential
    manner.
    :param url_client:
    :param servers_details:
    :param sequential_tests_lists:
    :param name_of_browser:
    :param version_of_browser:
    :return:
    """
    driver_object = None
    try:
        # Get remote web-driver instance
        driver_object = test_utils.get_remote_webdriver(hub_url,
                                                        name_of_browser,
                                                        version_of_browser,
                                                        "Sequential_Tests")

        # Launch client url in browser
        test_utils.launch_url_in_browser(driver_object, url_client)

        # Add name for thread
        thread_name = "sequential_tests"

        # Start thread
        for ser in servers_details:
            t = threading.Thread(target=execute_test,
                                 name=thread_name,
                                 args=(sequential_tests_lists, ser,
                                       driver_object))
            t.start()
            t.join()
    except Exception as exc:
        # Print exception stack trace
        traceback.print_exc(file=sys.stderr)
        print(str(exc))
    finally:
        # Clean driver object created
        test_utils.quit_webdriver(driver_object)
Esempio n. 2
0
def execute_test(test_module_list_passed, server_passed, driver_passed):
    """
    Function executes actually test
    :param test_module_list_passed:
    :param server_passed:
    :param driver_passed:
    :return:
    """
    try:
        print("\n=============Running the test cases for '%s' ============="
              % server_passed['name'], file=sys.stderr)
        # Create test server
        server_information = \
            test_utils.create_parent_server_node(server_passed)

        # Create test database with random number to avoid conflict in
        # parallel execution on different platforms. This database will be
        # used across all feature tests.
        test_db_name = "acceptance_test_db" + \
                       str(random.randint(10000, 65535))
        connection = test_utils.get_db_connection(
            server_passed['db'],
            server_passed['username'],
            server_passed['db_password'],
            server_passed['host'],
            server_passed['port'],
            server_passed['sslmode']
        )

        # Add the server version in server information
        server_information['server_version'] = connection.server_version
        server_information['type'] = server_passed['type']

        # Drop the database if already exists.
        test_utils.drop_database(connection, test_db_name)

        # Create database
        test_utils.create_database(server_passed, test_db_name)

        # Configure preferences for the test cases
        test_utils.configure_preferences(
            default_binary_path=server_passed['default_binary_paths'])

        # Get unit test suit
        suite = get_suite(test_module_list_passed,
                          server_passed,
                          test_client,
                          server_information, test_db_name, driver_passed)

        # Run unit test suit created
        tests = unittest.TextTestRunner(stream=sys.stderr,
                                        descriptions=True,
                                        verbosity=2).run(suite)

        # processing results
        ran_tests, failed_cases, skipped_cases, passed_cases = \
            get_tests_result(tests)

        # This is required when some tests are running parallel
        # & some sequential in case of parallel ui tests
        if threading.current_thread().getName() == "sequential_tests":
            try:
                if test_result[server_passed['name']][0] is not None:
                    ran_tests = test_result[server_passed['name']][0] + \
                        ran_tests
                    failed_cases.update(test_result[server_passed['name']][1])
                    skipped_cases.update(test_result[server_passed['name']][2])
                    passed_cases.update(test_result[server_passed['name']][3])
                test_result[server_passed['name']] = [ran_tests, failed_cases,
                                                      skipped_cases,
                                                      passed_cases]
            except KeyError:
                pass

        # Add final results server wise in test_result dict
        test_result[server_passed['name']] = [ran_tests, failed_cases,
                                              skipped_cases, passed_cases]

        # Set empty list for 'passed' parameter for each testRun.
        # So that it will not append same test case name
        # unittest.result.TestResult.passed = []

        # Drop the testing database created initially
        if connection:
            test_utils.drop_database(connection, test_db_name)
            connection.close()
        # Delete test server
        # test_utils.delete_test_server(test_client)
        test_utils.delete_server(test_client, server_information)
    except Exception as exc:
        traceback.print_exc(file=sys.stderr)
        print(str(exc))
        print("Exception in {0} {1}".format(
            threading.current_thread().ident,
            threading.currentThread().getName()))
    finally:
        # Delete web-driver instance
        thread_name = "parallel_tests" + server_passed['name']
        if threading.currentThread().getName() == thread_name:
            test_utils.quit_webdriver(driver_passed)
            time.sleep(20)

        # Print info about completed tests
        print(
            "\n=============Completed the test cases for '%s'============="
            % server_passed['name'], file=sys.stderr)