def _execute(self): self.start_time = time.time() suite_error = False # run suite `before` function if self.suite.before: try: self.suite.before.__call__() except: print('ERROR: suite before function failed') print(traceback.format_exc()) if not suite_error: if self.interactive and self.execution.processes != 1: print( 'WARNING: to run in debug mode, processes must equal one') if self.execution.processes == 1: # run tests serially for test in self.execution.tests: run_test(session.testdir, self.project, test.name, test.data_set, test.secrets, test.browser, test.env, session.settings, test.reportdir, self.execution.has_failed_tests, self.execution.tags, self.is_suite) else: # run tests using multiprocessing multiprocess_executor(self.project, self.execution.tests, self.execution.has_failed_tests, self.execution.processes, self.execution.tags, self.is_suite) # run suite `after` function if self.suite.after: try: self.suite.after.__call__() except: print('ERROR: suite before function failed') print(traceback.format_exc()) self._finalize()
def execute(self): start_time = time.time() suite_error = False # run suite `before` function if self.suite.before: try: self.suite.before.__call__() except: print('ERROR: suite before function failed') print(traceback.format_exc()) if not suite_error: if self.interactive and self.execution.processes != 1: print('WARNING: to run in debug mode, threads must equal one') if self.execution.processes == 1: # run tests serially for test in self.execution.tests: run_test(test_execution.root_path, self.project, test.name, test.data_set, test.browser, test_execution.settings, test.reportdir) else: # run tests using multiprocessing multiprocess_executor(self.project, self.execution.tests, self.execution.processes) # run suite `after` function if self.suite.after: try: self.suite.after.__call__() except: print('ERROR: suite before function failed') print(traceback.format_exc()) # generate execution_result.json elapsed_time = round(time.time() - start_time, 2) report_parser.generate_execution_report(self.execution.reportdir, elapsed_time)
def run_test_or_suite(workspace, project, test=None, suite=None, directory_suite=None): '''run a test, a suite or a "directory suite"''' # suitex = { # 'tests': [] # } tests = [] threads = 1 suite_amount_workers = None suite_drivers = None suite_envs = [] drivers = [] suite_module = None report_suite_name = None is_suite = False # get test list if test: tests = [test] report_suite_name = 'single_tests' elif suite: tests = utils.get_suite_test_cases(workspace, project, suite) suite_amount_workers = utils.get_suite_amount_of_workers(workspace, project, suite) suite_drivers = utils.get_suite_browsers(workspace, project, suite) suite_envs = utils.get_suite_environments(workspace, project, suite) suite_module = utils.get_suite_module(test_execution.root_path, test_execution.project, suite) report_suite_name = suite is_suite = True elif directory_suite: tests = utils.get_directory_suite_test_cases(workspace, project, directory_suite) report_suite_name = directory_suite is_suite = True else: sys.exit("ERROR: invalid arguments for run_test_or_suite()") # get threads if test_execution.thread_amount: # the thread count passed through cli has higher priority threads = test_execution.thread_amount elif suite_amount_workers: threads = suite_amount_workers settings_default_driver = test_execution.settings['default_browser'] drivers = utils.choose_driver_by_precedence(cli_drivers=test_execution.cli_drivers, suite_drivers=suite_drivers, settings_default_driver=settings_default_driver) # check if drivers are remote remote_browsers = settings_manager.get_remote_browsers(test_execution.settings) default_browsers = gui_utils.get_supported_browsers_suggestions() drivers_temp = [] for driver in drivers: if driver in remote_browsers: remote_browser = test_execution.settings['remote_browsers'][driver] _ = { 'name': remote_browser['browserName'], 'full_name': driver, 'remote': True, 'capabilities': remote_browser } drivers_temp.append(_) elif driver in default_browsers: _ = { 'name': driver, 'full_name': '', 'remote': False, 'capabilities': None } drivers_temp.append(_) else: msg = ['Error: the browser {} is not defined\n'.format(driver), 'available options are:\n', '\n'.join(default_browsers), '\n'.join(remote_browsers)] #sys.exit('Error: the browser {} is not defined'.format(driver)) sys.exit(''.join(msg)) drivers = drivers_temp # timestamp is passed when the test is executed from the GUI, # otherwise, a timestamp should be generated at this point # the timestamp is used to identify this unique execution of the test or suite if not test_execution.timestamp: test_execution.timestamp = utils.get_timestamp() ####### project_envs = environment_manager.get_envs(project) envs = [] if test_execution.cli_environments: # use the environments passed through command line if available envs = test_execution.cli_environments elif suite_envs: # use the environments defined in the suite envs = suite_envs elif project_envs: # if there are available envs, try to use the first by default envs = [project_envs[0]] else: # execute using a blank environment envs = [''] envs_data = environment_manager.get_environment_data(project) # get test data for each test present in the list of tests # for each test in the list, for each data set and driver combination # append an entry to the execution_list execution_list = [] for test_case in tests: data_sets = utils.get_test_data(workspace, project, test_case) for data_set in data_sets: for env in envs: data_set_env = dict(data_set) if env in envs_data: env_data = envs_data[env] ## adding env_data to data_set data_set_env['env'] = env_data data_set_env['env']['name'] = env for driver in drivers: execution_list.append( { 'test_name': test_case, 'data_set': data_set_env, 'driver': driver, 'report_directory': None } ) if is_suite: execution_directory = report.create_suite_execution_directory(test_execution.root_path, test_execution.project, report_suite_name, test_execution.timestamp) else: execution_directory = report.create_test_execution_directory(test_execution.root_path, test_execution.project, test, test_execution.timestamp) # for test in execution_list: # generate a report directory for this test report_directory = report.create_report_directory(execution_directory, test['test_name'], is_suite) test['report_directory'] = report_directory if suite: if hasattr(suite_module, 'before'): suite_module.before() if test_execution.interactive and threads == 1: if threads == 1: # run tests serially for test in execution_list: run_test(test_execution.root_path, test_execution.project, test['test_name'], test['data_set'], test['driver'], test_execution.settings, test['report_directory']) else: print('Error: to run in debug mode, threads must equal one') else: # run list of tests using multiprocessing multiprocess_executor(execution_list, is_suite, execution_directory, threads) if suite: if hasattr(suite_module, 'after'): suite_module.after()
def _execute(self): start_time = time.time() suite_error = False # run suite `before` function if self.suite.before: try: self.suite.before.__call__() except: print('ERROR: suite before function failed') print(traceback.format_exc()) if not suite_error: if self.interactive and self.execution.processes != 1: print('WARNING: to run in debug mode, threads must equal one') if self.execution.processes == 1: # run tests serially for test in self.execution.tests: run_test(test_execution.root_path, self.project, test.name, test.data_set, test.secrets, test.browser, test_execution.settings, test.reportdir, self.execution.has_failed_tests) else: # run tests using multiprocessing multiprocess_executor(self.project, self.execution.tests, self.execution.has_failed_tests, self.execution.processes) # run suite `after` function if self.suite.after: try: self.suite.after.__call__() except: print('ERROR: suite before function failed') print(traceback.format_exc()) # generate report.json elapsed_time = round(time.time() - start_time, 2) self.report = report_parser.generate_execution_report(self.execution.reportdir, elapsed_time) if self.is_suite: self._print_results() # generate requested reports if self.is_suite: report_name = self.report_name or 'report' report_folder = self.report_folder or self.execution.reportdir if 'junit' in self.reports: report_parser.generate_junit_report(self.execution.reportdir, self.suite_name, self.timestamp, self.report_folder, report_name) if 'json' in self.reports and (self.report_folder or self.report_name): report_parser.save_execution_json_report(self.report, report_folder, report_name) if 'html' in self.reports: gui_utils.generate_html_report(self.project, self.suite_name, self.timestamp, self.report_folder, report_name) if 'html-no-images' in self.reports: if 'html' in self.reports: report_name = report_name + '-no-images' gui_utils.generate_html_report(self.project, self.suite_name, self.timestamp, self.report_folder, report_name, no_images=True) # exit to the console with exit status code 1 in case a test fails if self.execution.has_failed_tests.value: sys.exit(1)
def run_test_or_suite(workspace, project, test=None, suite=None, directory=None): """Run a suite or test or directory containing tests.""" execution = { 'tests': [], 'workers': 1, 'drivers': [], 'environments': [], 'suite_before': None, 'suite_after': None } suite_amount_workers = None suite_drivers = None suite_envs = [] suite_name = None is_suite = False if test: execution['tests'] = [test] suite_name = 'single_tests' elif suite: execution['tests'] = suite_module.get_suite_test_cases( workspace, project, suite) suite_amount_workers = suite_module.get_suite_amount_of_workers( workspace, project, suite) suite_drivers = suite_module.get_suite_browsers( workspace, project, suite) suite_envs = suite_module.get_suite_environments( workspace, project, suite) suite_imported_module = suite_module.get_suite_module( workspace, project, suite) execution['suite_before'] = getattr(suite_imported_module, 'before', None) execution['suite_after'] = getattr(suite_imported_module, 'after', None) suite_name = suite is_suite = True elif directory: execution['tests'] = utils.get_directory_test_cases( workspace, project, directory) suite_name = directory is_suite = True else: sys.exit("ERROR: invalid arguments for run_test_or_suite()") # warn if no tests were found if len(execution['tests']) == 0: print('Warning: no tests were found') # get amount of workers (parallel executions), default is 1 if test_execution.thread_amount: # the thread count passed through cli has higher priority execution['workers'] = test_execution.thread_amount elif suite_amount_workers: execution['workers'] = suite_amount_workers # select the drivers to use in this execution # the order of precedence is: # 1. drivers defined by CLI # 2. drivers defined inside a suite # 3. 'default_driver' setting # 4. default default is 'chrome' settings_default_driver = test_execution.settings['default_browser'] selected_drivers = utils.choose_driver_by_precedence( cli_drivers=test_execution.cli_drivers, suite_drivers=suite_drivers, settings_default_driver=settings_default_driver) # Define the attributes for each driver # # A driver can be predefined ('chrome, 'chrome-headless', 'firefox', etc) # or it can be defined by the user with the 'remote_browsers' setting. # Remote browsers have extra details such as capabilities # # Each driver must have the following attributes: # 'name': real name, # 'full_name': the remote_browser name defined by the user, # 'remote': is this a remote_browser or not # 'capabilities': full capabilities defined in the remote_browsers setting remote_browsers = settings_manager.get_remote_browsers( test_execution.settings) default_browsers = gui_utils.get_supported_browsers_suggestions() execution['drivers'] = _define_drivers(selected_drivers, remote_browsers, default_browsers) # Generate timestamp if needed # A timestamp is passed when the test is executed from the GUI. # The gui uses this timestamp to fetch the test execution status later on. # Otherwise, a new timestamp should be generated at this point if not test_execution.timestamp: test_execution.timestamp = utils.get_timestamp() # Select which envs to use # The user can define environments in the environments.json file. # The suite/test can be executed in one or more of these environments. # Which environments to use is defined by this order of preference: # 1. envs passed by CLI # 2. envs defined inside the suite # 3. The first env defined # 4. no envs at all # # Note, in the case of 4, the test might fail if it tries # to use env variables cli_envs = test_execution.cli_environments project_envs = environment_manager.get_envs(workspace, project) execution['environments'] = _select_environments(cli_envs, suite_envs, project_envs) # Generate the execution list # # Each test must be executed for each: # * data set # * environment # * driver # # The result is a list that contains all the requested combinations execution_list = _define_execution_list(workspace, project, execution) # create the execution directory # # if this is a suite, the directory takes this structure # reports/<suite_name>/<timestamp>/ # # if this is a single test, the directory takes this structure: # reports/single_tests/<test_name>/<timestamp>/ execution_directory = _create_execution_directory(workspace, project, test_execution.timestamp, test_name=test, suite_name=suite_name, is_suite=is_suite) # for each test, create the test directory # for example, in a suite 'suite1' with a 'test1': # reports/suite1/2017.07.02.19.22.20.001/test1/set_00001/ for test in execution_list: report_directory = report.create_report_directory( execution_directory, test['test_name'], is_suite) test['report_directory'] = report_directory # EXECUTION start_time = time.time() suite_error = False # run suite `before` function if execution['suite_before']: try: execution['suite_before'].__call__() except: print('ERROR: suite before function failed') print(traceback.format_exc()) if not suite_error: if test_execution.interactive and execution['workers'] != 1: print('WARNING: to run in debug mode, threads must equal one') if execution['workers'] == 1: # run tests serially for test in execution_list: run_test(workspace, project, test['test_name'], test['data_set'], test['driver'], test_execution.settings, test['report_directory']) else: # run tests using multiprocessing multiprocess_executor(execution_list, execution['workers']) # run suite `after` function if execution['suite_after']: try: execution['suite_after'].__call__() except: print('ERROR: suite before function failed') print(traceback.format_exc()) # generate execution_result.json elapsed_time = round(time.time() - start_time, 2) report_parser.generate_execution_report(execution_directory, elapsed_time)
def run_test_or_suite(workspace, project, test=None, suite=None, directory_suite=None): '''run a test, a suite or a "directory suite"''' tests = [] threads = 1 suite_amount_workers = None suite_drivers = None drivers = [] suite_module = None report_suite_name = None is_suite = False # get test list if test: tests = [test] report_suite_name = 'single_tests' elif suite: tests = utils.get_suite_test_cases(workspace, project, suite) suite_amount_workers = utils.get_suite_amount_of_workers( workspace, project, suite) suite_drivers = utils.get_suite_browsers(workspace, project, suite) suite_module = utils.get_suite_module(test_execution.root_path, test_execution.project, suite) report_suite_name = suite is_suite = True elif directory_suite: tests = utils.get_directory_suite_test_cases(workspace, project, directory_suite) report_suite_name = directory_suite is_suite = True else: sys.exit("ERROR: invalid arguments for run_test_or_suite()") # get threads if test_execution.thread_amount: # the thread count passed through cli has higher priority threads = test_execution.thread_amount elif suite_amount_workers: threads = suite_amount_workers drivers = utils.choose_driver_by_precedence( cli_drivers=test_execution.cli_drivers, suite_drivers=suite_drivers, settings_default_driver=test_execution.settings['default_driver']) # timestamp is passed when the test is executed from the GUI, # otherwise, a timestamp should be generated at this point # the timestamp is used to identify this unique execution of the test or suite if not test_execution.timestamp: test_execution.timestamp = utils.get_timestamp() # get test data for each test present in the list of tests # for each test in the list, for each data set and driver combination # append an entry to the execution_list execution_list = [] for test_case in tests: data_sets = utils.get_test_data(workspace, project, test_case) for data_set in data_sets: for driver in drivers: execution_list.append({ 'test_name': test_case, 'data_set': vars(data_set), 'driver': driver, 'report_directory': None }) if is_suite: execution_directory = report.create_suite_execution_directory( test_execution.root_path, test_execution.project, report_suite_name, test_execution.timestamp) else: execution_directory = report.create_test_execution_directory( test_execution.root_path, test_execution.project, test, test_execution.timestamp) # for test in execution_list: # generate a report directory for this test report_directory = report.create_report_directory( execution_directory, test['test_name'], is_suite) test['report_directory'] = report_directory if suite: if hasattr(suite_module, 'before'): suite_module.before() if test_execution.interactive and threads == 1: if threads == 1: # run tests serially for test in execution_list: run_test(test_execution.root_path, test_execution.project, test['test_name'], test['data_set'], test['driver'], test_execution.settings, test['report_directory']) else: print('Error: to run in debug mode, threads must equal one') else: # run list of tests using threading multiprocess_executor(execution_list, is_suite, execution_directory, threads) if suite: if hasattr(suite_module, 'after'): suite_module.after()