def test_project_custom_browsers(self, project_function, test_utils): testdir, project_name = project_function.activate() project = Project(project_name) path = os.path.join(project.path, 'browsers.py') content = 'def browser1(settings):\n' \ ' pass\n' \ 'def browser2(settings):\n' \ ' pass\n' \ 'def _not_a_browser(settings):\n' \ ' pass' with open(path, 'w') as f: f.write(content) custom_browsers = project.custom_browsers() assert custom_browsers == ['browser1', 'browser2']
def open_browser(browser_name=None, capabilities=None, remote_url=None, browser_id=None): """Open a browser. When no arguments are provided the browser is selected from the CLI -b|--browsers argument, the suite `browsers` list, or the `default_browser` setting. This can be overridden in two ways: - a local webdriver instance or - a remote Selenium Grid driver instance. To open a local Webdriver instance pass browser_name with a valid value: chrome, chrome-remote, chrome-headless, chrome-remote-headless, edge, edge-remote, firefox, firefox-headless, firefox-remote, firefox-remote-headless, ie, ie-remote, opera, opera-remote To open a remote Selenium Grid driver pass a capabilities dictionary and a remote_url. The minimum capabilities required is: { browserName: 'chrome' version: '' platform: '' } More info here: https://github.com/SeleniumHQ/selenium/wiki/DesiredCapabilities If remote_url is None it will be taken from the `remote_url` setting. When opening more than one browser instance per test provide a browser_id to switch between browsers later on :Raises: - InvalidBrowserIdError: The browser Id is already in use :Returns: the opened browser """ @contextmanager def validate_exec_path(browser_name, exec_path_setting, settings): executable_path = settings[exec_path_setting] if executable_path: matched_executable_path = utils.match_latest_executable_path( executable_path, execution.testdir) if matched_executable_path: try: yield matched_executable_path except: msg = f"Could not start {browser_name} driver using the path '{executable_path}'\n" \ f"verify that the {exec_path_setting} setting points to a valid webdriver executable." execution.logger.error(msg) execution.logger.info(traceback.format_exc()) raise Exception(msg) else: msg = f'No executable file found using path {executable_path}' execution.logger.error(msg) raise Exception(msg) else: msg = f'{exec_path_setting} setting is not defined' execution.logger.error(msg) raise Exception(msg) @contextmanager def validate_remote_url(remote_url): if remote_url: yield remote_url else: msg = 'remote_url setting is required' execution.logger.error(msg) raise Exception(msg) project = Project(execution.project_name) browser_definition = execution.browser_definition settings = execution.settings if browser_name is None: browser_name = browser_definition['name'] if capabilities is None: capabilities = browser_definition['capabilities'] if remote_url is None: remote_url = settings['remote_url'] is_custom = False if not browser_id: if len(execution.browsers) == 0: browser_id = 'main' else: browser_id = f'browser{len(execution.browsers)}' if browser_id in execution.browsers: raise InvalidBrowserIdError( f"browser id '{browser_id}' is already in use") # remote if capabilities: with validate_remote_url(remote_url) as remote_url: driver = GolemRemoteDriver(command_executor=remote_url, desired_capabilities=capabilities) # Chrome elif browser_name == 'chrome': with validate_exec_path('chrome', 'chromedriver_path', settings) as ex_path: chrome_options = webdriver.ChromeOptions() if settings['start_maximized']: chrome_options.add_argument('start-maximized') driver = GolemChromeDriver(executable_path=ex_path, chrome_options=chrome_options) # Chrome headless elif browser_name == 'chrome-headless': with validate_exec_path('chrome', 'chromedriver_path', settings) as ex_path: chrome_options = webdriver.ChromeOptions() chrome_options.add_argument('headless') chrome_options.add_argument('--window-size=1600,1600') driver = GolemChromeDriver(executable_path=ex_path, chrome_options=chrome_options) # Chrome remote elif browser_name == 'chrome-remote': with validate_remote_url(remote_url) as remote_url: driver = GolemRemoteDriver( command_executor=remote_url, desired_capabilities=DesiredCapabilities.CHROME) # Chrome remote headless elif browser_name == 'chrome-remote-headless': with validate_remote_url(remote_url) as remote_url: chrome_options = webdriver.ChromeOptions() chrome_options.add_argument('headless') desired_capabilities = chrome_options.to_capabilities() driver = GolemRemoteDriver( command_executor=remote_url, desired_capabilities=desired_capabilities) # Edge elif browser_name == 'edge': with validate_exec_path('edge', 'edgedriver_path', settings) as ex_path: driver = GolemEdgeDriver(executable_path=ex_path) # Edge remote elif browser_name == 'edge-remote': with validate_remote_url(remote_url) as remote_url: driver = GolemRemoteDriver( command_executor=remote_url, desired_capabilities=DesiredCapabilities.EDGE) # Firefox elif browser_name == 'firefox': with validate_exec_path('firefox', 'geckodriver_path', settings) as ex_path: driver = GolemGeckoDriver(executable_path=ex_path) # Firefox headless elif browser_name == 'firefox-headless': with validate_exec_path('firefox', 'geckodriver_path', settings) as ex_path: firefox_options = webdriver.FirefoxOptions() firefox_options.headless = True driver = GolemGeckoDriver(executable_path=ex_path, firefox_options=firefox_options) # Firefox remote elif browser_name == 'firefox-remote': with validate_remote_url(remote_url) as remote_url: driver = GolemRemoteDriver( command_executor=remote_url, desired_capabilities=DesiredCapabilities.FIREFOX) # Firefox remote headless elif browser_name == 'firefox-remote-headless': with validate_remote_url(remote_url) as remote_url: firefox_options = webdriver.FirefoxOptions() firefox_options.headless = True desired_capabilities = firefox_options.to_capabilities() driver = GolemRemoteDriver( command_executor=remote_url, desired_capabilities=desired_capabilities) # IE elif browser_name == 'ie': with validate_exec_path('internet explorer', 'iedriver_path', settings) as ex_path: driver = GolemIeDriver(executable_path=ex_path) # IE remote elif browser_name == 'ie-remote': with validate_remote_url(remote_url) as remote_url: driver = GolemRemoteDriver( command_executor=remote_url, desired_capabilities=DesiredCapabilities.INTERNETEXPLORER) # Opera elif browser_name == 'opera': with validate_exec_path('opera', 'operadriver_path', settings) as ex_path: opera_options = webdriver.ChromeOptions() if 'opera_binary_path' in settings: opera_options.binary_location = settings['opera_binary_path'] driver = GolemOperaDriver(executable_path=ex_path, options=opera_options) # Opera remote elif browser_name == 'opera-remote': with validate_remote_url(remote_url) as remote_url: driver = GolemRemoteDriver( command_executor=remote_url, desired_capabilities=DesiredCapabilities.OPERA) elif browser_name in project.custom_browsers(): is_custom = True module, _ = project.custom_browser_module() custom_browser_func = getattr(module, browser_name) driver = custom_browser_func(settings) else: raise Exception( f"Error: {browser_definition['name']} is not a valid driver") if settings['start_maximized'] and not is_custom: # currently there is no way to maximize chrome window on OSX (chromedriver 2.43), adding workaround # https://bugs.chromium.org/p/chromedriver/issues/detail?id=2389 # https://bugs.chromium.org/p/chromedriver/issues/detail?id=2522 # TODO: assess if this work-around is still needed when chromedriver 2.44 is released is_mac = 'mac' in driver.capabilities.get('platform', '').lower() if not ('chrome' in browser_definition['name'] and is_mac): driver.maximize_window() execution.browsers[browser_id] = driver # Set the new browser as the active browser execution.browser = driver return execution.browser
class ExecutionRunner: """Executes tests or suites. Three points of entry: run_test run_suite run_directory """ def __init__(self, project_name, browsers=None, processes=1, environments=None, interactive=False, timestamp=None, reports=None, report_folder=None, report_name=None, tags=None, test_functions=None): if reports is None: reports = [] if tags is None: tags = [] self.project = Project(project_name) self.cli_args = SimpleNamespace(browsers=browsers, processes=processes, envs=environments, tags=tags) self.interactive = interactive self.timestamp = timestamp self.reports = reports self.report_folder = report_folder self.report_name = report_name self.report = {} self.tests = [] self.is_suite = False self.suite_name = None self.test_name = None self.execution_name = None self.selected_browsers = None self.start_time = None self.test_functions = test_functions self.suite = SimpleNamespace(processes=None, browsers=None, envs=None, before=None, after=None, tags=None) has_failed_tests = self._create_execution_has_failed_tests_flag() self.execution = SimpleNamespace(processes=1, browsers=[], envs=[], tests=[], reportdir=None, tags=[], has_failed_tests=has_failed_tests) @staticmethod def _create_execution_has_failed_tests_flag(): """Multiprocessing safe flag to track if any test has failed or errored during the execution Returns a multiprocessing.managers.ValueProxy """ return multiprocessing.Manager().Value('error', False) def _select_environments(self, project_envs): """Define the environments to use for the test. The test can have a list of environments set from 2 places: - using the -e|--environments CLI argument - suite `environments` variable If both of these are empty try using the first env if there are any envs defined for the project. Otherwise just return [''] meaning: no envs will be used. """ if self.cli_args.envs: # use the environments passed through command line envs = self.cli_args.envs elif self.suite.envs: # use the environments defined in the suite envs = self.suite.envs elif project_envs: # if there are available envs, use the first by default envs = [sorted(project_envs)[0]] else: envs = [] return envs def _create_execution_directory(self): """Generate the execution report directory""" return exec_report.create_execution_directory(self.project.name, self.execution_name, self.timestamp) def _define_execution_list(self): """Generate the execution list Generates a list with the required combinations for each of the following elements: - tests - data sets - environments - browsers """ execution_list = [] envs = self.execution.envs or [None] envs_data = environment_manager.get_environment_data(self.project.name) secrets = secrets_manager.get_secrets(self.project.name) for test in self.tests: data_sets = test_data.get_parsed_test_data(self.project.name, test) if len(data_sets) > 1 or len(envs) > 1 or len( self.execution.browsers) > 1: # If the test file contain multiple data sets, envs or browsers # then each set will have a unique id (set_name) multiple_data_sets = True else: # otherwise it's just one set with set_name = '' multiple_data_sets = False for data_set in data_sets: for env in envs: data_set_env = dict(data_set) if env in envs_data: # add env_data to data_set data_set_env['env'] = envs_data[env] data_set_env['env']['name'] = env for browser in self.execution.browsers: if multiple_data_sets: set_name = str(uuid.uuid4())[:6] else: set_name = '' testdef = SimpleNamespace( name=test, data_set=data_set_env, secrets=secrets, browser=browser, reportdir=self.execution.reportdir, env=env, set_name=set_name) execution_list.append(testdef) return execution_list def _print_number_of_tests_found(self): """Print number of tests and test sets to console""" test_number = len(self.tests) set_number = len(self.execution.tests) if test_number > 0: msg = f'Tests found: {test_number}' if test_number != set_number: msg = f'{msg} ({set_number} sets)' print(msg) def _filter_tests_by_tags(self): tests = [] try: tests = tags_manager.filter_tests_by_tags(self.project.name, self.tests, self.execution.tags) except tags_manager.InvalidTagExpression as e: print(f'{e.__class__.__name__}: {e}') self.execution.has_failed_tests.value = True else: if len(tests) == 0: print("No tests found with tag(s): {}".format(', '.join( self.execution.tags))) return tests def _get_elapsed_time(self, start_time): elapsed_time = 0 if start_time: elapsed_time = round(time.time() - self.start_time, 2) return elapsed_time def run_test(self, test): """Run a single test. `test` can be a path to a Python file or an import path. Both relative to the tests folder. Example: test = 'folder/test.py' test = 'folder.test' """ if test.endswith('.py'): filename, _ = os.path.splitext(test) test = '.'.join(os.path.normpath(filename).split(os.sep)) self.tests = [test] self.test_name = test self.suite_name = test self.execution_name = test self._prepare() def run_suite(self, suite): """Run a suite. `suite` can be a path to a Python file or an import path. Both relative to the suites folder. Example: test = 'folder/suite.py' test = 'folder.suite' """ # TODO if suite.endswith('.py'): filename, _ = os.path.splitext(suite) suite = '.'.join(os.path.normpath(filename).split(os.sep)) suite_obj = suite_module.Suite(self.project.name, suite) self.tests = suite_obj.tests if len(self.tests) == 0: print(f'No tests found for suite {suite}') self.suite.processes = suite_obj.processes self.suite.browsers = suite_obj.browsers self.suite.envs = suite_obj.environments self.suite.tags = suite_obj.tags module = suite_obj.get_module() self.suite.before = getattr(module, 'before', None) self.suite.after = getattr(module, 'after', None) self.suite_name = suite self.execution_name = suite self.is_suite = True self._prepare() def run_directory(self, directory): """Run every test inside a directory. `directory` has to be a relative path from the tests folder. To run every test in tests folder use: directory='' """ self.tests = self.project.tests(directory=directory) if len(self.tests) == 0: print(f'No tests were found in {os.path.join("tests", directory)}') self.is_suite = True if directory == '': suite_name = 'all' else: suite_name = '.'.join(os.path.normpath(directory).split(os.sep)) self.suite_name = suite_name self.execution_name = suite_name self._prepare() def _prepare(self): # Generate timestamp if needed. # A timestamp is passed when the test is executed from the GUI. # The gui uses this timestamp to fetch the test execution status later on. # Otherwise, a new timestamp should be generated at this point. if not self.timestamp: self.timestamp = utils.get_timestamp() # create the execution report directory # The directory takes this structure: # reports/<execution_name>/<timestamp>/ self.execution.reportdir = self._create_execution_directory() # Filter tests by tags self.execution.tags = self.cli_args.tags or self.suite.tags or [] if self.execution.tags: self.tests = self._filter_tests_by_tags() if not self.tests: self._finalize() else: # get amount of processes (parallel executions), default is 1 if self.cli_args.processes > 1: # the processes arg passed through cli has higher priority self.execution.processes = self.cli_args.processes elif self.suite.processes: self.execution.processes = self.suite.processes # select the browsers to use in this execution # the order of precedence is: # 1. browsers defined by CLI # 2. browsers defined inside a suite # 3. 'default_browser' setting key # 4. default default is 'chrome' self.selected_browsers = utils.choose_browser_by_precedence( cli_browsers=self.cli_args.browsers, suite_browsers=self.suite.browsers, settings_default_browser=session.settings['default_browser']) # Define the attributes for each browser. # A browser name can be predefined ('chrome, 'chrome-headless', 'firefox', etc) # or it can be defined by the user with the 'remote_browsers' setting. # Remote browsers have extra details such as capabilities # # Each defined browser must have the following attributes: # 'name': real name, # 'capabilities': full capabilities defined in the remote_browsers setting remote_browsers = settings_manager.get_remote_browsers( session.settings) default_browsers = gui_utils.get_supported_browsers_suggestions() custom_browsers = self.project.custom_browsers() self.execution.browsers = define_browsers(self.selected_browsers, remote_browsers, default_browsers, custom_browsers) # The user can define environments in the environments.json file. # The suite/test can be executed in one or more of these environments. # Which environments will be used is defined by this order of preference: # 1. envs passed by CLI # 2. envs defined inside the suite # 3. The first env defined for the project # 4. no envs at all # # Note, in the case of 4, the test might fail if it tries # to use env variables project_envs = environment_manager.get_envs(self.project.name) self.execution.envs = self._select_environments(project_envs) invalid_envs = [ e for e in self.execution.envs if e not in project_envs ] if invalid_envs: print( 'ERROR: the following environments do not exist for project ' f'{self.project.name}: {", ".join(invalid_envs)}') self.execution.has_failed_tests.value = True self._finalize() return # Generate the execution list # Each test must be executed for each: # * data set # * environment # * browser # The result is a list that contains all the requested combinations self.execution.tests = self._define_execution_list() # Initialize reports with status 'pending' initialize_reports_for_test_files(self.project.name, self.execution.tests) self._print_number_of_tests_found() try: self._execute() except KeyboardInterrupt: self.execution.has_failed_tests.value = True self._finalize() def _execute(self): self.start_time = time.time() suite_error = False # run suite `before` function if self.suite.before: try: self.suite.before.__call__() except: print('ERROR: suite before function failed') print(traceback.format_exc()) if not suite_error: if self.interactive and self.execution.processes != 1: print( 'WARNING: to run in debug mode, processes must equal one') if self.execution.processes == 1: # run tests serially for test in self.execution.tests: run_test(session.testdir, self.project.name, test.name, test.data_set, test.secrets, test.browser, test.env, session.settings, test.reportdir, test.set_name, self.test_functions, self.execution.has_failed_tests, self.execution.tags, self.is_suite) else: # run tests using multiprocessing multiprocess_executor(self.project.name, self.execution.tests, self.execution.has_failed_tests, self.test_functions, self.execution.processes, self.execution.tags, self.is_suite) # run suite `after` function if self.suite.after: try: self.suite.after.__call__() except: print('ERROR: suite before function failed') print(traceback.format_exc()) self._finalize() def _finalize(self): elapsed_time = self._get_elapsed_time(self.start_time) # generate report.json self.report = exec_report.generate_execution_report( self.execution.reportdir, elapsed_time, self.execution.browsers, self.execution.processes, self.execution.envs, self.execution.tags, session.settings['remote_url']) cli_report.report_to_cli(self.report) cli_report.print_totals(self.report) # generate requested reports report_name = self.report_name or 'report' report_folder = self.report_folder or self.execution.reportdir if 'junit' in self.reports: junit_report.generate_junit_report(self.project.name, self.execution_name, self.timestamp, self.report_folder, report_name) if 'json' in self.reports and (self.report_folder or self.report_name): exec_report.save_execution_json_report(self.report, report_folder, report_name) if 'html' in self.reports: html_report.generate_html_report(self.project.name, self.suite_name, self.timestamp, self.report_folder, report_name) if 'html-no-images' in self.reports: if 'html' in self.reports: report_name = report_name + '-no-images' html_report.generate_html_report(self.project.name, self.suite_name, self.timestamp, self.report_folder, report_name, no_images=True) # exit to the console with exit status code 1 in case a test fails if self.execution.has_failed_tests.value: sys.exit(1)