def _select_environments(self): """Define the environments to use for the test. The test can have a list of environments set from 2 places: - using the -e|--environments CLI argument - suite `environments` variable If both of these are empty try using the first env if there are any envs defined for the project. Otherwise just return [''] meaning: no envs will be used. """ all_project_envs = environment_manager.get_envs(test_execution.root_path, self.project) if self.cli_args.envs: # use the environments passed through command line envs = self.cli_args.envs elif self.suite.envs: # use the environments defined in the suite envs = self.suite.envs elif all_project_envs: # if there are available envs, use the first by default envs = [sorted(all_project_envs)[0]] else: envs = [''] return envs
def test_get_envs_empty_file(self, project_session): testdir = project_session.testdir project = project_session.name env_json_path = os.path.join(project_session.path, 'environments.json') with open(env_json_path, 'w') as env_json_file: env_json_file.write('') envs = environment_manager.get_envs(testdir, project) assert envs == []
def test_get_envs(self, project_session): _, project = project_session.activate() env_json_path = os.path.join(project_session.path, 'environments.json') with open(env_json_path, 'w') as env_json_file: env_json_file.write(ENV_DATA) envs = environment_manager.get_envs(project) assert len(envs) == 2 assert 'test' in envs assert 'development' in envs
def test_get_envs_invalid_json(self, project_session): testdir = project_session.testdir project = project_session.name env_json_path = os.path.join(project_session.path, 'environments.json') with open(env_json_path, 'w') as env_json_file: env_json_file.write(ENV_DATA_INVALID_JSON) envs = environment_manager.get_envs(testdir, project) expected_envs = [] assert envs == expected_envs
def test_get_envs_file_not_exist(self, project_session): testdir = project_session.testdir project = project_session.name env_json_path = os.path.join(project_session.path, 'environments.json') if os.path.isfile(env_json_path): os.remove(env_json_path) envs = environment_manager.get_envs(testdir, project) expected_envs = [] assert envs == expected_envs
def test_get_envs_invalid_json(self, permanent_project_fixture): project = permanent_project_fixture['name'] testdir = permanent_project_fixture['testdir'] env_json_path = os.path.join(testdir, 'projects', project, 'environments.json') with open(env_json_path, 'w') as env_json_file: env_json_file.write(ENV_DATA_INVALID_JSON) envs = environment_manager.get_envs(testdir, project) expected_envs = [] assert envs == expected_envs
def test_get_envs_file_not_exist(self, permanent_project_fixture): project = permanent_project_fixture['name'] testdir = permanent_project_fixture['testdir'] env_json_path = os.path.join(testdir, 'projects', project, 'environments.json') if os.path.isfile(env_json_path): os.remove(env_json_path) envs = environment_manager.get_envs(testdir, project) expected_envs = [] assert envs == expected_envs
def test__select_environments_all_envs_empty(self, project_function): """Verify that _select_environments uses the correct order of precedence when cli environments, suite environments and project environments are empty""" _, project = project_function.activate() execution_runner = exc_runner.ExecutionRunner(project) execution_runner.cli_args.envs = [] execution_runner.cli_args.envs = [] project_envs = environment_manager.get_envs(project) result_envs = execution_runner._select_environments(project_envs) assert result_envs == []
def test__select_environments(self, project_session): """Verify that _select_environments uses the correct order of precedence""" _, project = project_session.activate() cli_envs = ['cli_env_1', 'cli_env_2'] execution_runner = exc_runner.ExecutionRunner(project) execution_runner.cli_args.envs = cli_envs execution_runner.suite.envs = ['suite_env_1', 'suite_env_2'] project_envs = environment_manager.get_envs(project) result_envs = execution_runner._select_environments(project_envs) assert result_envs == cli_envs
def test_get_envs(self, permanent_project_fixture): project = permanent_project_fixture['name'] testdir = permanent_project_fixture['testdir'] env_json_path = os.path.join(testdir, 'projects', project, 'environments.json') with open(env_json_path, 'w') as env_json_file: env_json_file.write(ENV_DATA) envs = environment_manager.get_envs(testdir, project) expected_envs = ['test', 'development'] assert len(envs) == 2 assert 'test' in envs assert 'development' in envs
def test__select_environments_cli_envs_empty_suite_envs_empty( self, project_function): """Verify that _select_environments uses the correct order of precedence when cli environments and suite environments are empty""" testdir, project = project_function.activate() execution_runner = exc_runner.ExecutionRunner(project) execution_runner.cli_args.envs = [] execution_runner.suite.envs = [] path = os.path.join(testdir, 'projects', project, 'environments.json') with open(path, 'w+') as f: f.write('{"env3": {}, "env4": {}}') project_envs = environment_manager.get_envs(project) result_envs = execution_runner._select_environments(project_envs) assert result_envs == ['env3']
def test__select_environments_cli_envs_empty(self, project_function): """Verify that _select_environments uses the correct order of precedence when cli environments is empty""" testdir, project = project_function.activate() cli_envs = [] suite_envs = ['suite_env_1', 'suite_env_2'] execution_runner = exc_runner.ExecutionRunner() execution_runner.project = project execution_runner.cli_args.envs = cli_envs execution_runner.suite.envs = suite_envs path = os.path.join(testdir, 'environments.json') with open(path, 'w+') as f: f.write('{"env1": {}, "env2": {}}') project_envs = environment_manager.get_envs(project) result_envs = execution_runner._select_environments(project_envs) assert result_envs == suite_envs
def get_environments(): project = request.form['project'] return json.dumps(environment_manager.get_envs(project))
def get_environments(): project = request.args['project'] return json.dumps(environment_manager.get_envs(root_path, project))
def project_environments(): project = request.args['project'] _verify_permissions(Permissions.READ_ONLY, project) return jsonify(environment_manager.get_envs(project))
def _prepare(self): # Generate timestamp if needed. # A timestamp is passed when the test is executed from the GUI. # The gui uses this timestamp to fetch the test execution status later on. # Otherwise, a new timestamp should be generated at this point. if not self.timestamp: self.timestamp = utils.get_timestamp() # create the execution report directory # if this is a suite, the directory takes this structure: # reports/<suite_name>/<timestamp>/ # # if this is a single test, the directory takes this structure: # reports/single_tests/<test_name>/<timestamp>/ self.execution.reportdir = self._create_execution_directory() # Filter tests by tags self.execution.tags = self.cli_args.tags or self.suite.tags or [] if self.execution.tags: self.tests = self._filter_tests_by_tags() if not self.tests: self._finalize() else: # get amount of processes (parallel executions), default is 1 if self.cli_args.processes > 1: # the processes arg passed through cli has higher priority self.execution.processes = self.cli_args.processes elif self.suite.processes: self.execution.processes = self.suite.processes # select the browsers to use in this execution # the order of precedence is: # 1. browsers defined by CLI # 2. browsers defined inside a suite # 3. 'default_browser' setting key # 4. default default is 'chrome' self.selected_browsers = utils.choose_browser_by_precedence( cli_browsers=self.cli_args.browsers, suite_browsers=self.suite.browsers, settings_default_browser=session.settings['default_browser']) # Define the attributes for each browser. # A browser name can be predefined ('chrome, 'chrome-headless', 'firefox', etc) # or it can be defined by the user with the 'remote_browsers' setting. # Remote browsers have extra details such as capabilities # # Each defined browser must have the following attributes: # 'name': real name, # 'full_name': the remote_browser name defined by the user, # 'remote': is this a remote_browser or not # 'capabilities': full capabilities defined in the remote_browsers setting remote_browsers = settings_manager.get_remote_browsers(session.settings) default_browsers = gui_utils.get_supported_browsers_suggestions() self.execution.browsers = define_browsers(self.selected_browsers, remote_browsers, default_browsers) # Select which environments to use # The user can define environments in the environments.json file. # The suite/test can be executed in one or more of these environments. # Which environments will be used is defined by this order of preference: # 1. envs passed by CLI # 2. envs defined inside the suite # 3. The first env defined for the project # 4. no envs at all # # Note, in the case of 4, the test might fail if it tries # to use env variables project_envs = environment_manager.get_envs(self.project) self.execution.envs = self._select_environments(project_envs) invalid_envs = [e for e in self.execution.envs if e not in project_envs] if invalid_envs: print('ERROR: the following environments do not exist for project {}: {}' .format(self.project, ', '.join(invalid_envs))) self.execution.has_failed_tests.value = True self._finalize() return # Generate the execution list # Each test must be executed for each: # * data set # * environment # * browser # The result is a list that contains all the requested combinations self.execution.tests = self._define_execution_list() self._print_number_of_tests_found() # for each test, create the test report directory # for example, in a suite 'suite1' with a 'test1': # reports/suite1/2017.07.02.19.22.20.001/test1/set_00001/ for test in self.execution.tests: test.reportdir = report.create_report_directory(self.execution.reportdir, test.name, self.is_suite) try: self._execute() except KeyboardInterrupt: self.execution.has_failed_tests.value = True self._finalize()
def run_test_or_suite(workspace, project, test=None, suite=None, directory=None): """Run a suite or test or directory containing tests.""" execution = { 'tests': [], 'workers': 1, 'drivers': [], 'environments': [], 'suite_before': None, 'suite_after': None } suite_amount_workers = None suite_drivers = None suite_envs = [] suite_name = None is_suite = False if test: execution['tests'] = [test] suite_name = 'single_tests' elif suite: execution['tests'] = suite_module.get_suite_test_cases( workspace, project, suite) suite_amount_workers = suite_module.get_suite_amount_of_workers( workspace, project, suite) suite_drivers = suite_module.get_suite_browsers( workspace, project, suite) suite_envs = suite_module.get_suite_environments( workspace, project, suite) suite_imported_module = suite_module.get_suite_module( workspace, project, suite) execution['suite_before'] = getattr(suite_imported_module, 'before', None) execution['suite_after'] = getattr(suite_imported_module, 'after', None) suite_name = suite is_suite = True elif directory: execution['tests'] = utils.get_directory_test_cases( workspace, project, directory) suite_name = directory is_suite = True else: sys.exit("ERROR: invalid arguments for run_test_or_suite()") # warn if no tests were found if len(execution['tests']) == 0: print('Warning: no tests were found') # get amount of workers (parallel executions), default is 1 if test_execution.thread_amount: # the thread count passed through cli has higher priority execution['workers'] = test_execution.thread_amount elif suite_amount_workers: execution['workers'] = suite_amount_workers # select the drivers to use in this execution # the order of precedence is: # 1. drivers defined by CLI # 2. drivers defined inside a suite # 3. 'default_driver' setting # 4. default default is 'chrome' settings_default_driver = test_execution.settings['default_browser'] selected_drivers = utils.choose_driver_by_precedence( cli_drivers=test_execution.cli_drivers, suite_drivers=suite_drivers, settings_default_driver=settings_default_driver) # Define the attributes for each driver # # A driver can be predefined ('chrome, 'chrome-headless', 'firefox', etc) # or it can be defined by the user with the 'remote_browsers' setting. # Remote browsers have extra details such as capabilities # # Each driver must have the following attributes: # 'name': real name, # 'full_name': the remote_browser name defined by the user, # 'remote': is this a remote_browser or not # 'capabilities': full capabilities defined in the remote_browsers setting remote_browsers = settings_manager.get_remote_browsers( test_execution.settings) default_browsers = gui_utils.get_supported_browsers_suggestions() execution['drivers'] = _define_drivers(selected_drivers, remote_browsers, default_browsers) # Generate timestamp if needed # A timestamp is passed when the test is executed from the GUI. # The gui uses this timestamp to fetch the test execution status later on. # Otherwise, a new timestamp should be generated at this point if not test_execution.timestamp: test_execution.timestamp = utils.get_timestamp() # Select which envs to use # The user can define environments in the environments.json file. # The suite/test can be executed in one or more of these environments. # Which environments to use is defined by this order of preference: # 1. envs passed by CLI # 2. envs defined inside the suite # 3. The first env defined # 4. no envs at all # # Note, in the case of 4, the test might fail if it tries # to use env variables cli_envs = test_execution.cli_environments project_envs = environment_manager.get_envs(workspace, project) execution['environments'] = _select_environments(cli_envs, suite_envs, project_envs) # Generate the execution list # # Each test must be executed for each: # * data set # * environment # * driver # # The result is a list that contains all the requested combinations execution_list = _define_execution_list(workspace, project, execution) # create the execution directory # # if this is a suite, the directory takes this structure # reports/<suite_name>/<timestamp>/ # # if this is a single test, the directory takes this structure: # reports/single_tests/<test_name>/<timestamp>/ execution_directory = _create_execution_directory(workspace, project, test_execution.timestamp, test_name=test, suite_name=suite_name, is_suite=is_suite) # for each test, create the test directory # for example, in a suite 'suite1' with a 'test1': # reports/suite1/2017.07.02.19.22.20.001/test1/set_00001/ for test in execution_list: report_directory = report.create_report_directory( execution_directory, test['test_name'], is_suite) test['report_directory'] = report_directory # EXECUTION start_time = time.time() suite_error = False # run suite `before` function if execution['suite_before']: try: execution['suite_before'].__call__() except: print('ERROR: suite before function failed') print(traceback.format_exc()) if not suite_error: if test_execution.interactive and execution['workers'] != 1: print('WARNING: to run in debug mode, threads must equal one') if execution['workers'] == 1: # run tests serially for test in execution_list: run_test(workspace, project, test['test_name'], test['data_set'], test['driver'], test_execution.settings, test['report_directory']) else: # run tests using multiprocessing multiprocess_executor(execution_list, execution['workers']) # run suite `after` function if execution['suite_after']: try: execution['suite_after'].__call__() except: print('ERROR: suite before function failed') print(traceback.format_exc()) # generate execution_result.json elapsed_time = round(time.time() - start_time, 2) report_parser.generate_execution_report(execution_directory, elapsed_time)
def test_get_envs_file_not_exist(self, project_session): _, project = project_session.activate() env_json_path = os.path.join(project_session.path, 'environments.json') if os.path.isfile(env_json_path): os.remove(env_json_path) assert environment_manager.get_envs(project) == []
def test_get_envs_invalid_json(self, project_session): _, project = project_session.activate() env_json_path = os.path.join(project_session.path, 'environments.json') with open(env_json_path, 'w') as env_json_file: env_json_file.write(ENV_DATA_INVALID_JSON) assert environment_manager.get_envs(project) == []
def test_get_envs_empty_file(self, project_session): _, project = project_session.activate() env_json_path = os.path.join(project_session.path, 'environments.json') with open(env_json_path, 'w') as env_json_file: env_json_file.write('') assert environment_manager.get_envs(project) == []
def run_test_or_suite(workspace, project, test=None, suite=None, directory_suite=None): '''run a test, a suite or a "directory suite"''' # suitex = { # 'tests': [] # } tests = [] threads = 1 suite_amount_workers = None suite_drivers = None suite_envs = [] drivers = [] suite_module = None report_suite_name = None is_suite = False # get test list if test: tests = [test] report_suite_name = 'single_tests' elif suite: tests = utils.get_suite_test_cases(workspace, project, suite) suite_amount_workers = utils.get_suite_amount_of_workers(workspace, project, suite) suite_drivers = utils.get_suite_browsers(workspace, project, suite) suite_envs = utils.get_suite_environments(workspace, project, suite) suite_module = utils.get_suite_module(test_execution.root_path, test_execution.project, suite) report_suite_name = suite is_suite = True elif directory_suite: tests = utils.get_directory_suite_test_cases(workspace, project, directory_suite) report_suite_name = directory_suite is_suite = True else: sys.exit("ERROR: invalid arguments for run_test_or_suite()") # get threads if test_execution.thread_amount: # the thread count passed through cli has higher priority threads = test_execution.thread_amount elif suite_amount_workers: threads = suite_amount_workers settings_default_driver = test_execution.settings['default_browser'] drivers = utils.choose_driver_by_precedence(cli_drivers=test_execution.cli_drivers, suite_drivers=suite_drivers, settings_default_driver=settings_default_driver) # check if drivers are remote remote_browsers = settings_manager.get_remote_browsers(test_execution.settings) default_browsers = gui_utils.get_supported_browsers_suggestions() drivers_temp = [] for driver in drivers: if driver in remote_browsers: remote_browser = test_execution.settings['remote_browsers'][driver] _ = { 'name': remote_browser['browserName'], 'full_name': driver, 'remote': True, 'capabilities': remote_browser } drivers_temp.append(_) elif driver in default_browsers: _ = { 'name': driver, 'full_name': '', 'remote': False, 'capabilities': None } drivers_temp.append(_) else: msg = ['Error: the browser {} is not defined\n'.format(driver), 'available options are:\n', '\n'.join(default_browsers), '\n'.join(remote_browsers)] #sys.exit('Error: the browser {} is not defined'.format(driver)) sys.exit(''.join(msg)) drivers = drivers_temp # timestamp is passed when the test is executed from the GUI, # otherwise, a timestamp should be generated at this point # the timestamp is used to identify this unique execution of the test or suite if not test_execution.timestamp: test_execution.timestamp = utils.get_timestamp() ####### project_envs = environment_manager.get_envs(project) envs = [] if test_execution.cli_environments: # use the environments passed through command line if available envs = test_execution.cli_environments elif suite_envs: # use the environments defined in the suite envs = suite_envs elif project_envs: # if there are available envs, try to use the first by default envs = [project_envs[0]] else: # execute using a blank environment envs = [''] envs_data = environment_manager.get_environment_data(project) # get test data for each test present in the list of tests # for each test in the list, for each data set and driver combination # append an entry to the execution_list execution_list = [] for test_case in tests: data_sets = utils.get_test_data(workspace, project, test_case) for data_set in data_sets: for env in envs: data_set_env = dict(data_set) if env in envs_data: env_data = envs_data[env] ## adding env_data to data_set data_set_env['env'] = env_data data_set_env['env']['name'] = env for driver in drivers: execution_list.append( { 'test_name': test_case, 'data_set': data_set_env, 'driver': driver, 'report_directory': None } ) if is_suite: execution_directory = report.create_suite_execution_directory(test_execution.root_path, test_execution.project, report_suite_name, test_execution.timestamp) else: execution_directory = report.create_test_execution_directory(test_execution.root_path, test_execution.project, test, test_execution.timestamp) # for test in execution_list: # generate a report directory for this test report_directory = report.create_report_directory(execution_directory, test['test_name'], is_suite) test['report_directory'] = report_directory if suite: if hasattr(suite_module, 'before'): suite_module.before() if test_execution.interactive and threads == 1: if threads == 1: # run tests serially for test in execution_list: run_test(test_execution.root_path, test_execution.project, test['test_name'], test['data_set'], test['driver'], test_execution.settings, test['report_directory']) else: print('Error: to run in debug mode, threads must equal one') else: # run list of tests using multiprocessing multiprocess_executor(execution_list, is_suite, execution_directory, threads) if suite: if hasattr(suite_module, 'after'): suite_module.after()
def get_environments(): project = request.form['project'] return json.dumps(environment_manager.get_envs(root_path, project))