def run_command(project='', test_query='', browsers=None, processes=1, environments=None, interactive=False, timestamp=None, reports=None, report_folder=None, report_name=None, tags=None): execution_runner = ExecutionRunner(browsers, processes, environments, interactive, timestamp, reports, report_folder, report_name, tags) if project: if utils.project_exists(project): execution_runner.project = project session.settings = settings_manager.get_project_settings(project) # add --interactive value to settings to make # it available from inside a test session.settings['interactive'] = interactive if test_query: if suite_module.suite_exists(project, test_query): execution_runner.run_suite(test_query) elif test_case.test_case_exists(project, test_query): execution_runner.run_test(test_query) else: if test_query == '.': test_query = '' path = os.path.join(session.testdir, 'projects', project, 'tests', test_query) if os.path.isdir(path): execution_runner.run_directory(test_query) else: msg = ('golem run: error: the value {} does not match ' 'an existing test, suite or directory'.format( test_query)) sys.exit(msg) else: print(messages.RUN_USAGE_MSG) test_cases = utils.get_test_cases(project) print('Test Cases:') utils.display_tree_structure_command_line( test_cases['sub_elements']) test_suites = utils.get_suites(project) print('\nTest Suites:') # TODO print suites in structure for suite in test_suites['sub_elements']: print(' ' + suite['name']) else: msg = ('golem run: error: the project {} does not exist'.format( project)) sys.exit(msg) elif interactive: interactive_module.interactive(session.settings, browsers) else: print(messages.RUN_USAGE_MSG) print('Projects:') for project in utils.get_projects(): print(' {}'.format(project))
def test_get_internal_test_data(self, testdir_fixture, project_fixture): test_name = 'test_get_internal_test_data' input_data = [ { 'col1': "'a'", 'col2': "'b'" }, { 'col1': "'c'", 'col2': "'d'", } ] test_case.new_test_case(testdir_fixture['path'], project_fixture['name'], [], test_name) test_steps = { 'setup': [], 'test': [], 'teardown': [] } test_execution.settings = settings_manager.get_project_settings(testdir_fixture['path'], project_fixture['name']) test_execution.settings['test_data'] = 'infile' test_case.save_test_case(testdir_fixture['path'], project_fixture['name'], test_name, '', [], test_steps, input_data) internal_data = test_data.get_internal_test_data(testdir_fixture['path'], project_fixture['name'], test_name) assert internal_data == input_data
def test_run_test__failure_in_test(self, project_function_clean, caplog, test_utils): """test() throws AssertionError teardown() is run """ testdir, project = project_function_clean.activate() test_name = test_utils.random_numeric_string(10) content = """ description = 'desc' def setup(data): step('setup step') def test(data): step('test step') fail('test fail') def teardown(data): step('teardown step') """ self._create_test(testdir, project, test_name, content) report_directory = self._mock_report_directory(testdir, project, test_name) settings = settings_manager.get_project_settings(project) browser = _define_browsers_mock(['chrome'])[0] test_runner.run_test(testdir=testdir, project=project, test_name=test_name, test_data={}, secrets={}, browser=browser, settings=settings, report_directory=report_directory) # verify console logs records = caplog.records assert records[6].message == 'Test Result: FAILURE' # verify report.json report = self._read_report_json(report_directory) assert report['result'] == 'failure' assert len(report['steps']) == 4 assert len(report['errors']) == 1 assert report['errors'][0]['message'] == 'AssertionError: test fail'
def test_get_test_data(self, testdir_fixture, project_fixture): input_data = [ { 'col1': 'a', 'col2': 'b' }, { 'col1': 'c', 'col2': 'd', } ] test_execution.settings = settings_manager.get_project_settings(testdir_fixture['path'], project_fixture['name']) test_execution.settings['test_data'] = 'csv' test_case.new_test_case(testdir_fixture['path'], project_fixture['name'], [], 'test_get_data') test_data.save_external_test_data_file(testdir_fixture['path'], project_fixture['name'], 'test_get_data', input_data) returned_data = test_data.get_test_data(testdir_fixture['path'], project_fixture['name'], 'test_get_data') assert returned_data == input_data
def test_get_project_settings_default(self, project_function_clean): testdir = project_function_clean.testdir project = project_function_clean.name project_settings = settings_manager.get_project_settings( testdir, project) expected = { 'console_log_level': 'INFO', 'default_browser': 'chrome', 'chromedriver_path': './drivers/chromedriver*', 'edgedriver_path': './drivers/edgedriver*', 'geckodriver_path': './drivers/geckodriver*', 'iedriver_path': './drivers/iedriver*', 'operadriver_path': './drivers/operadriver*', 'search_timeout': 20, 'wait_displayed': False, 'log_all_events': True, 'remote_browsers': {}, 'remote_url': 'http://localhost:4444/wd/hub', 'screenshot_on_end': False, 'screenshot_on_error': True, 'screenshot_on_step': False, 'test_data': 'csv', 'wait_hook': None, 'start_maximized': True } assert project_settings == expected
def test_run_single_test_with_two_sets(self, project_class, test_utils, capsys): """Run a single test with two data sets. It should display the number of tests and test sets found.""" testdir, project = project_class.activate() test_name = 'foo002' timestamp = utils.get_timestamp() session.settings = settings_manager.get_project_settings(project) content = ('data = [{"foo": 1}, {"foo": 2}]\n' 'def test(data):\n' ' pass\n') test_utils.create_test(project, test_name, content=content) execution_runner = exc_runner.ExecutionRunner(browsers=['chrome'], timestamp=timestamp) execution_runner.project = project execution_runner.run_test(test_name) out, err = capsys.readouterr() # number of tests is displayed assert 'Tests found: 1 (2 sets)' in out test_report_dir = os.path.join(testdir, 'projects', project, 'reports', 'single_tests', test_name, timestamp) assert os.path.isdir(test_report_dir) items = os.listdir(test_report_dir) # two test set dirs + report.json assert len(items) == 3
def test_run_test__success_with_data(self, project_function_clean, caplog, test_utils): """Test runs successfully with test data""" testdir = project_function_clean.testdir project = project_function_clean.name test_name = test_utils.random_numeric_string(10) content = """ description = 'some description' def setup(data): step('setup step') def test(data): step('test step') def teardown(data): step('teardown step') """ self._create_test(testdir, project, test_name, content) report_directory = self._mock_report_directory(testdir, project, test_name) settings = settings_manager.get_project_settings(testdir, project) browser = _define_browsers_mock(['chrome'])[0] test_data = dict(username='******', password='******') secrets = dict(very='secret') # run test test_runner.run_test(workspace=testdir, project=project, test_name=test_name, test_data=test_data, secrets=secrets, browser=browser, settings=settings, report_directory=report_directory) # verify console logs records = caplog.records assert records[0].message == 'Test execution started: {}'.format( test_name) assert records[1].message == 'Browser: chrome' # Python 3.4 results not in order TODO value_a = 'Using data:\n username: username1\n password: password1\n' value_b = 'Using data:\n password: password1\n username: username1\n' assert records[2].message in [value_a, value_b] assert records[3].message == 'setup step' assert records[4].message == 'test step' assert records[5].message == 'teardown step' assert records[6].message == 'Test Result: SUCCESS' # verify report.json report = self._read_report_json(report_directory) assert report['browser'] == 'chrome' assert report['description'] == 'some description' assert report['environment'] == '' assert report['errors'] == [] assert report['result'] == 'success' # Python 3.4 TODO assert report['set_name'] in ['username1', 'password1'] assert report['steps'] == [ {'message': 'setup step', 'screenshot': None, 'error': None}, {'message': 'test step', 'screenshot': None, 'error': None}, {'message': 'teardown step', 'screenshot': None, 'error': None}, ] assert report['test_case'] == test_name assert report['test_data'] == {'username': "******", 'password': "******"} assert 'test_elapsed_time' in report assert 'test_timestamp' in report assert len(report.keys()) == 11
def test_run_test__exception_in_test(self, project_function_clean, caplog, test_utils): """test() throws exception""" testdir = project_function_clean.testdir project = project_function_clean.name test_name = test_utils.random_numeric_string(10) content = """ description = 'desc' def setup(data): step('setup step') def test(data): foo = bar def teardown(data): step('teardown step') """ self._create_test(testdir, project, test_name, content) report_directory = self._mock_report_directory(testdir, project, test_name) settings = settings_manager.get_project_settings(testdir, project) browser = _define_browsers_mock(['chrome'])[0] test_runner.run_test(workspace=testdir, project=project, test_name=test_name, test_data={}, secrets={}, browser=browser, settings=settings, report_directory=report_directory) # verify console logs records = caplog.records assert records[5].message == 'Test Result: CODE ERROR' # verify report.json report = self._read_report_json(report_directory) assert report['result'] == 'code error' assert len(report['steps']) == 3 assert len(report['errors']) == 1 assert report['errors'][0]['message'] == "NameError: name 'bar' is not defined"
def activate(self): if not self.settings: self.settings = settings_manager.get_project_settings( self.testdir, self.name) test_execution.root_path = self.testdir test_execution.settings = self.settings return self
def project_supported_browsers(): project = request.args['project'] _verify_permissions(Permissions.READ_ONLY, project) settings = settings_manager.get_project_settings(project) remote_browsers = settings_manager.get_remote_browser_list(settings) default_browsers = gui_utils.get_supported_browsers_suggestions() return jsonify(remote_browsers + default_browsers)
def test_get_project_settings_default(self, project_class): testdir = project_class['testdir'] project = project_class['name'] project_settings = settings_manager.get_project_settings( testdir, project) expected = { 'console_log_level': 'INFO', 'default_browser': 'chrome', 'chromedriver_path': './drivers/chromedriver*', 'edgedriver_path': './drivers/edgedriver*', 'geckodriver_path': './drivers/geckodriver*', 'iedriver_path': './drivers/iedriver*', 'operadriver_path': './drivers/operadriver*', # 'safari_path': './drivers/safari*', 'search_timeout': 20, 'wait_displayed': False, 'log_all_events': True, 'remote_browsers': {}, 'remote_url': 'http://localhost:4444/wd/hub', 'screenshot_on_end': False, 'screenshot_on_error': True, 'screenshot_on_step': False, 'test_data': 'csv', 'wait_hook': None, 'results_to_db': False, 'db_string': '', 'db_type': 'TINYDB', 'db_name': 'testdb' } assert project_settings == expected
def save_test_case(project, full_test_case_name, description, page_objects, test_steps, test_data, tags): """Save test case contents to file. full_test_case_name is a relative dot path to the test """ test_case_path = test_file_path(project, full_test_case_name) formatted_description = _format_description(description) with open(test_case_path, 'w', encoding='utf-8') as f: # write description f.write('\n') f.write(formatted_description) f.write('\n') # write tags f.write('tags = {}\n'.format(_format_tags_string(tags))) f.write('\n') # write the list of pages f.write('pages = {}\n'.format( _format_page_object_string(page_objects))) f.write('\n') # write test data if required or save test data to external file settings = settings_manager.get_project_settings(project) if settings['test_data'] == 'infile': if test_data: f.write('data = {}'.format(_format_data(test_data))) test_data_module.remove_csv_if_exists(project, full_test_case_name) else: test_data_module.save_external_test_data_file( project, full_test_case_name, test_data) # write the setup function f.write('def setup(data):\n') if test_steps['setup']: for step in test_steps['setup']: step_action = step['action'].replace(' ', '_') param_str = ', '.join(step['parameters']) f.write(' {0}({1})\n'.format(step_action, param_str)) else: f.write(' pass\n') f.write('\n') # write the test function f.write('def test(data):\n') if test_steps['test']: for step in test_steps['test']: step_action = step['action'].replace(' ', '_') param_str = ', '.join(step['parameters']) f.write(' {0}({1})\n'.format(step_action, param_str)) else: f.write(' pass\n') f.write('\n') # write the teardown function f.write('def teardown(data):\n') if test_steps['teardown']: for step in test_steps['teardown']: step_action = step['action'].replace(' ', '_') param_str = ', '.join(step['parameters']) f.write(' {0}({1})\n'.format(step_action, param_str)) else: f.write(' pass\n')
def runfix(project_module, test_utils): """A fixture that Uses a project fix with module scope, Creates a random test Creates a report directory for a future execution Gets the settings and browser values required to run test Can run the test provided the test code Can read the json report """ testdir, project = project_module.activate() test_name = test_utils.create_random_test(project) timestamp = utils.get_timestamp() exec_dir = _mock_report_directory(project, execution_name=test_name, timestamp=timestamp) settings = settings_manager.get_project_settings(project) browser = _define_browsers_mock(['chrome'])[0] env_name = None def set_content(test_content): test_module.edit_test_code(project, test_name, test_content) def run_test(code, test_data={}, secrets={}, from_suite=False, set_name=''): set_content(code) test_runner.run_test(testdir, project, test_name, test_data, secrets, browser, env_name, settings, exec_dir, set_name=set_name, test_functions=[], from_suite=from_suite) def read_report(set_name=''): return _read_report_json(exec_dir, test_name, set_name=set_name) fix = SimpleNamespace(testdir=testdir, project=project, test_name=test_name, report_directory=exec_dir, settings=settings, browser=browser, set_content=set_content, run_test=run_test, read_report=read_report) return fix
def test_run_test__import_error_page_object(self, project_function_clean, caplog, test_utils): """The test fails with 'code error' when an imported page has a syntax error""" testdir = project_function_clean.testdir project = project_function_clean.name test_name = test_utils.random_numeric_string(10) content = """ pages = ['page1'] def setup(data): step('this step wont be run') def test(data): step('this step wont be run') def teardown(data): step('this step wont be run') """ self._create_test(testdir, project, test_name, content) page_content = """ element1 = ('id', 'someId' element2 = ('css', '.oh.no') """ self._create_page(testdir, project, 'page1', page_content) report_directory = self._mock_report_directory(testdir, project, test_name) settings = settings_manager.get_project_settings(testdir, project) browser = _define_browsers_mock(['chrome'])[0] test_runner.run_test(workspace=testdir, project=project, test_name=test_name, test_data={}, secrets={}, browser=browser, settings=settings, report_directory=report_directory) # verify console logs records = caplog.records assert records[0].message == 'Test execution started: {}'.format(test_name) assert records[1].message == 'Browser: chrome' assert records[2].levelname == 'ERROR' error_contains = "element2 = ('css', '.oh.no')\n ^\nSyntaxError: invalid syntax" assert error_contains in records[2].message assert records[3].message == 'Test Result: CODE ERROR' # verify report.json report = self._read_report_json(report_directory) assert report['browser'] == 'chrome' assert report['description'] is None # description could not be read assert report['environment'] == '' assert len(report['errors']) == 1 assert 'SyntaxError: invalid syntax' in report['errors'][0]['message'] assert error_contains in report['errors'][0]['description'] assert report['result'] == 'code error' assert report['set_name'] == '' assert report['steps'] == [] assert report['test_case'] == test_name assert report['test_data'] == {} assert 'test_elapsed_time' in report assert 'test_timestamp' in report assert len(report.keys()) == 11
def edit_test_code(project, test_name, content, table_test_data): path = Test(project, test_name).path with open(path, 'w', encoding='utf-8') as f: f.write(content) # save test data settings = settings_manager.get_project_settings(project) if settings['test_data'] == 'csv': # save csv data test_data_module.save_external_test_data_file(project, test_name, table_test_data) elif settings['test_data'] == 'infile': # remove csv files test_data_module.remove_csv_if_exists(project, test_name)
def save_settings(): if request.method == 'POST': projectname = request.json['project'] project_settings = request.json['projectSettings'] global_settings = request.json['globalSettings'] result = {'result': 'ok', 'errors': []} settings_manager.save_settings(projectname, project_settings, global_settings) # re-read settings test_execution.settings = settings_manager.get_project_settings( root_path, projectname) return json.dumps(result)
def get_actions(self, project_name=None): if self.actions is None: self._get_actions() if project_name: settings = settings_manager.get_project_settings(project_name) else: settings = settings_manager.get_global_settings() if settings['implicit_actions_import']: return self.actions else: return self.explicit_actions
def test_run_test__success(self, project_function_clean, caplog, test_utils): """Test runs successfully""" testdir = project_function_clean.testdir project = project_function_clean.name test_name = test_utils.random_numeric_string(10) content = """ description = 'some description' def setup(data): step('setup step') def test(data): step('test step') def teardown(data): step('teardown step') """ self._create_test(testdir, project, test_name, content) report_directory = self._mock_report_directory(testdir, project, test_name) settings = settings_manager.get_project_settings(testdir, project) browser = _define_browsers_mock(['chrome'])[0] # run test test_runner.run_test(workspace=testdir, project=project, test_name=test_name, test_data={}, secrets={}, browser=browser, settings=settings, report_directory=report_directory) # verify console logs records = caplog.records assert records[0].message == 'Test execution started: {}'.format(test_name) assert records[1].message == 'Browser: chrome' assert records[2].message == 'setup step' assert records[3].message == 'test step' assert records[4].message == 'teardown step' assert records[5].message == 'Test Result: SUCCESS' # verify report.json report = self._read_report_json(report_directory) assert report['browser'] == 'chrome' assert report['description'] == 'some description' assert report['environment'] == '' assert report['errors'] == [] assert report['result'] == 'success' assert report['set_name'] == '' assert report['steps'] == [ {'message': 'setup step', 'screenshot': None, 'error': None}, {'message': 'test step', 'screenshot': None, 'error': None}, {'message': 'teardown step', 'screenshot': None, 'error': None}, ] assert report['test_case'] == test_name assert report['test_data'] == {} assert 'test_elapsed_time' in report assert 'test_timestamp' in report assert len(report.keys()) == 11
def save_settings(): if request.method == 'POST': projectname = request.json['project'] project_settings = request.json['projectSettings'] global_settings = request.json['globalSettings'] result = { 'result': 'ok', 'errors': [] } settings_manager.save_settings(projectname, project_settings, global_settings) # re-read settings test_execution.settings = settings_manager.get_project_settings(root_path, projectname) return json.dumps(result)
def test_run_single_test(self, project_class, test_utils): testdir, project = project_class.activate() test_name = 'foo001' timestamp = utils.get_timestamp() session.settings = settings_manager.get_project_settings(project) test_utils.create_test(project, [], test_name) execution_runner = exc_runner.ExecutionRunner(browsers=['chrome'], timestamp=timestamp) execution_runner.project = project execution_runner.run_test(test_name) test_report_dir = os.path.join(testdir, 'projects', project, 'reports', 'single_tests', test_name, timestamp) assert os.path.isdir(test_report_dir) items = os.listdir(test_report_dir) # test set dir + report.json assert len(items) == 2
def save_settings(): if request.method == 'POST': project = request.json['project'] project_settings = request.json['projectSettings'] global_settings = request.json['globalSettings'] result = { 'result': 'ok', 'errors': [] } settings_manager.save_global_settings(global_settings) session.settings = settings_manager.get_global_settings() if project_settings: settings_manager.save_project_settings(project, project_settings) # re-read project settings session.settings = settings_manager.get_project_settings(project) return json.dumps(result)
def test_run_test__AssertionError_in_setup(self, project_function_clean, caplog, test_utils): """The test ends with 'failure' when the setup function throws AssertionError. Test is not run Teardown is run """ testdir = project_function_clean.testdir project = project_function_clean.name test_name = test_utils.random_numeric_string(10) content = """ description = 'desc' def setup(data): fail('setup step fail') def test(data): step('test step') def teardown(data): step('teardown step') """ self._create_test(testdir, project, test_name, content) report_directory = self._mock_report_directory(testdir, project, test_name) settings = settings_manager.get_project_settings(testdir, project) browser = _define_browsers_mock(['chrome'])[0] test_runner.run_test(workspace=testdir, project=project, test_name=test_name, test_data={}, secrets={}, browser=browser, settings=settings, report_directory=report_directory) # verify console logs records = caplog.records assert records[0].message == 'Test execution started: {}'.format(test_name) assert records[1].message == 'Browser: chrome' assert records[2].levelname == 'ERROR' assert 'setup step fail' in records[2].message assert 'AssertionError: setup step fail' in records[2].message assert records[3].message == 'teardown step' assert records[4].message == 'Test Result: FAILURE' # verify report.json report = self._read_report_json(report_directory) assert report['description'] == 'desc' assert len(report['errors']) == 1 assert 'setup step fail' in report['errors'][0]['message'] assert report['result'] == 'failure' assert report['steps'][0]['message'] == 'Failure' assert 'AssertionError: setup step fail' in report['steps'][0]['error']['description'] assert report['steps'][1]['message'] == 'teardown step'
def save_test_case_code(project, full_test_case_name, content, table_test_data): """Save test case contents string to file. full_test_case_name is a relative dot path to the test. """ test_case_path = test_file_path(project, full_test_case_name) with open(test_case_path, 'w') as test_file: test_file.write(content) # save test data settings = settings_manager.get_project_settings(project) if settings['test_data'] == 'csv': #save csv data test_data_module.save_external_test_data_file(project, full_test_case_name, table_test_data) elif settings['test_data'] == 'infile': # remove csv files test_data_module.remove_csv_if_exists(project, full_test_case_name)
def test_run_test__import_error_on_test(self, project_function_clean, caplog, test_utils): """The test fails with 'code error' when it has a syntax error Test result is code error""" testdir = project_function_clean.testdir project = project_function_clean.name test_name = test_utils.random_numeric_string(10) content = """ description = 'some description' # missing colon def test(data) step('this step wont be run') """ self._create_test(testdir, project, test_name, content) report_directory = self._mock_report_directory(testdir, project, test_name) settings = settings_manager.get_project_settings(testdir, project) browser = _define_browsers_mock(['chrome'])[0] test_runner.run_test(workspace=testdir, project=project, test_name=test_name, test_data={}, secrets={}, browser=browser, settings=settings, report_directory=report_directory) # verify console logs records = caplog.records assert records[0].message == 'Test execution started: {}'.format( test_name) assert records[1].message == 'Browser: chrome' assert records[2].levelname == 'ERROR' error_contains = 'def test(data)\n ^\nSyntaxError: invalid syntax' assert error_contains in records[2].message assert records[3].message == 'Test Result: CODE ERROR' # verify report.json report = self._read_report_json(report_directory) assert report['browser'] == 'chrome' assert report['description'] is None # description could not be read assert report['environment'] == '' assert len(report['errors']) == 1 assert report['errors'][0]['message'] == 'SyntaxError: invalid syntax' assert error_contains in report['errors'][0]['description'] assert report['result'] == 'code error' assert report['set_name'] == '' assert report['steps'] == [] assert report['test_case'] == test_name assert report['test_data'] == {}
def test_run_single_test_filter_by_tags(self, project_class, test_utils): """Run a single test with filtering by tags""" testdir, project = project_class.activate() test_name = 'foo003' timestamp = utils.get_timestamp() session.settings = settings_manager.get_project_settings(project) content = ('tags = ["alfa", "bravo"]\n' 'def test(data):\n' ' pass\n') test_utils.create_test(project, [], test_name, content=content) execution_runner = exc_runner.ExecutionRunner(browsers=['chrome'], timestamp=timestamp, tags=['alfa']) execution_runner.project = project execution_runner.run_test(test_name) test_report_dir = os.path.join(testdir, 'projects', project, 'reports', 'single_tests', test_name, timestamp) assert os.path.isdir(test_report_dir) items = os.listdir(test_report_dir) # test set dir + report.json assert len(items) == 2
def runfix(project_class, test_utils): """A fixture that Uses a project fix with class scope, Creates a random test Creates a report directory for a future execution Gets the settings and browser values required to run test Can run the test provided the test code Can read the json report """ testdir, project = project_class.activate() test_name = test_utils.create_random_test(project) report_directory = _mock_report_directory(testdir, project, test_name) settings = settings_manager.get_project_settings(project) browser = _define_browsers_mock(['chrome'])[0] env_name = None def run_test(code, test_data={}, secrets={}, from_suite=False): test_module.edit_test_code(project, test_name, code, []) test_runner.run_test(testdir, project, test_name, test_data, secrets, browser, env_name, settings, report_directory, from_suite=from_suite) def read_report(): return _read_report_json(report_directory) fix = SimpleNamespace(testdir=testdir, project=project, test_name=test_name, report_directory=report_directory, settings=settings, browser=browser, run_test=run_test, read_report=read_report) return fix
def test_get_project_settings_default(self, random_project_fixture): testdir = random_project_fixture['testdir'] project = random_project_fixture['name'] project_settings = settings_manager.get_project_settings( testdir, project) expected = { 'chromedriver_path': './drivers/chromedriver', 'console_log_level': 'INFO', 'default_browser': 'chrome', 'geckodriver_path': './drivers/geckodriver', 'iedriver_path': './drivers/iedriver.exe', 'implicit_wait': 20, 'log_all_events': True, 'remote_browsers': {}, 'remote_url': 'http://localhost:4444/wd/hub', 'screenshot_on_end': False, 'screenshot_on_error': True, 'screenshot_on_step': False, 'test_data': 'csv', 'wait_hook': None } assert project_settings == expected
def test_set_execution_module_runner_values(self, project_class, test_utils): testdir, project = project_class.activate() test_name = test_utils.create_random_test(project) test = test_module.Test(project, test_name) report_directory = _mock_report_directory(testdir, project, test_name) settings = settings_manager.get_project_settings(project) browser = _define_browsers_mock(['chrome'])[0] test_data = {} secrets = {} env_name = 'foo' runner = test_runner.TestRunner(testdir, project, test_name, test_data, secrets, browser, env_name, settings, report_directory) runner._set_execution_module_values() from golem import execution attrs = [x for x in dir(execution) if not x.startswith('_')] assert len(attrs) == 20 assert execution.browser is None assert execution.browser_definition == browser assert execution.browsers == {} assert execution.steps == [] assert execution.data == {} assert execution.secrets == {} assert execution.description is None assert execution.errors == [] assert execution.settings == settings assert execution.test_name == test_name assert execution.test_dirname == test.dirname assert execution.test_path == test.path assert execution.project_name == project assert execution.project_path == test.project.path assert execution.testdir == testdir assert execution.report_directory == report_directory assert execution.logger is None assert execution.timers == {} assert execution.tags == [] assert execution.environment == env_name
def test_run_test__error_in_setup_test_and_teardown(self, project_function_clean, caplog, test_utils): """setup(), test() and teardown() have errors """ testdir, project = project_function_clean.activate() test_name = test_utils.random_numeric_string(10) content = """ description = 'desc' def setup(data): error('setup error') def test(data): error('test error') def teardown(data): error('teardown error') """ self._create_test(testdir, project, test_name, content) report_directory = self._mock_report_directory(testdir, project, test_name) settings = settings_manager.get_project_settings(project) browser = _define_browsers_mock(['chrome'])[0] test_runner.run_test(testdir=testdir, project=project, test_name=test_name, test_data={}, secrets={}, browser=browser, settings=settings, report_directory=report_directory) # verify console logs records = caplog.records assert records[5].message == 'Test Result: ERROR' # verify report.json report = self._read_report_json(report_directory) assert report['result'] == 'error' assert len(report['steps']) == 3 assert len(report['errors']) == 3 assert report['errors'][0]['message'] == 'setup error' assert report['errors'][1]['message'] == 'test error' assert report['errors'][2]['message'] == 'teardown error'
def run(self, test_execution, args): test_execution.thread_amount = args.threads test_execution.cli_drivers = args.browsers test_execution.cli_environments = args.environments test_execution.timestamp = args.timestamp test_execution.interactive = args.interactive root_path = test_execution.root_path if args.project and args.test_or_suite: if not args.project in utils.get_projects(root_path): msg = ['Error: the project {0} does not exist'.format(args.project), '', 'Usage:', self._parser.usage, '', 'Projects:'] for proj in utils.get_projects(root_path): msg.append(' {}'.format(proj)) raise CommandException('\n'.join(msg)) else: test_execution.project = args.project test_execution.settings = settings_manager.get_project_settings(root_path, args.project) if utils.test_suite_exists(root_path, test_execution.project, args.test_or_suite): test_execution.suite = args.test_or_suite # execute test suite start_execution.run_test_or_suite(root_path, test_execution.project, suite=test_execution.suite) elif utils.test_case_exists(root_path, test_execution.project, args.test_or_suite): test_execution.test = args.test_or_suite # execute test case start_execution.run_test_or_suite(root_path, test_execution.project, test=test_execution.test) else: # test_or_suite does not match any existing suite or test msg = [('Error: the value {0} does not match an existing ' 'suite or test'.format(args.test_or_suite)), '', 'Usage:', self._parser.usage] raise CommandException('\n'.join(msg)) elif not args.project and not args.test_or_suite and test_execution.interactive: from golem.test_runner import interactive interactive.interactive(test_execution.settings, test_execution.cli_drivers) elif not args.project: msg = ['Usage:', self._parser.usage, '', 'Projects:'] for proj in utils.get_projects(root_path): msg.append(' {}'.format(proj)) raise CommandException('\n'.join(msg)) elif args.project and not args.test_or_suite: msg = ['Usage: {}'.format(self._parser.usage), '', 'Test Cases:'] print('\n'.join(msg)) test_cases = utils.get_test_cases(root_path, args.project) utils.display_tree_structure_command_line(test_cases['sub_elements']) print('\nTest Suites:') test_suites = utils.get_suites(root_path, args.project) for suite in test_suites['sub_elements']: print(' ' + suite['name']) raise CommandException() else: # test_or_suite does not match any existing suite or test raise CommandException( 'Error: the value {0} does not match an existing ' 'suite or test'.format(args.test_or_suite))
def get_supported_browsers(): project = request.form['project'] settings = settings_manager.get_project_settings(root_path, project) remote_browsers = settings_manager.get_remote_browsers(settings) default_browsers = gui_utils.get_supported_browsers_suggestions() return json.dumps(remote_browsers + default_browsers)
def test_get_project_settings_default(self, project_function_clean): _, project = project_function_clean.activate() project_settings = settings_manager.get_project_settings(project) assert project_settings == DEFAULT_PREDEFINED