def test_case_view(project, test_case_name): # check if user has permissions for this project if not user.has_permissions_to_project(g.user.id, project, root_path, 'gui'): return render_template('not_permission.html') # check if the file is locked # is_locked_by = lock.is_file_locked(root_path, project, test_case_name) # print(is_locked_by, g.user.username) # if is_locked_by and is_locked_by != g.user.username: # abort(404, 'This file is locked by someone else.') # else: tc_name, parents = utils.separate_file_from_parents(test_case_name) path = test_case.generate_test_case_path(root_path, project, test_case_name) error = utils.validate_python_file_syntax(path) if error: return render_template('test_builder/test_case_syntax_error.html', project=project, full_test_case_name=test_case_name) else: test_case_contents = test_case.get_test_case_content( root_path, project, test_case_name) test_data = test_data_module.get_test_data(root_path, project, test_case_name) return render_template('test_builder/test_case.html', project=project, test_case_contents=test_case_contents, test_case_name=tc_name, full_test_case_name=test_case_name, test_data=test_data)
def _define_execution_list(self): """Generate the execution list Generates a list with the required combinations for each of the following elements: - tests - data sets - environments - browsers """ execution_list = [] envs = self.execution.envs or [''] envs_data = environment_manager.get_environment_data(self.project) secrets = secrets_manager.get_secrets(self.project) for test in self.tests: data_sets = test_data.get_test_data(self.project, test) for data_set in data_sets: for env in envs: data_set_env = dict(data_set) if env in envs_data: # add env_data to data_set data_set_env['env'] = envs_data[env] data_set_env['env']['name'] = env for browser in self.execution.browsers: testdef = SimpleNamespace(name=test, data_set=data_set_env, secrets=secrets, browser=browser, reportdir=None) execution_list.append(testdef) return execution_list
def test_get_test_data_from_csv(self, project_class, test_utils): """when there is csv and infile data, csv has priority""" testdir = project_class['testdir'] project = project_class['name'] test_name = test_utils.random_string(5, 'test') test_content = ("data = {\n" " 'key1': 'value1',\n" " 'key2': 'value2',\n" "}\n") test_path = os.path.join(testdir, 'projects', project, 'tests', test_name + '.py') with open(test_path, 'w+') as f: f.write(test_content) data_path = os.path.join(testdir, 'projects', project, 'tests', test_name + '.csv') with open(data_path, 'w+') as f: f.write('key1,key2\nvalue3,value4\n') returned_data = test_data.get_test_data(testdir, project, test_name) expected = [ { 'key1': 'value3', 'key2': 'value4' }, ] assert returned_data == expected
def test_case_view(project, test_case_name): # check if the file is locked # is_locked_by = lock.is_file_locked(root_path, project, test_case_name) # print(is_locked_by, g.user.username) # if is_locked_by and is_locked_by != g.user.username: # abort(404, 'This file is locked by someone else.') # else: test_exists = test_case.test_case_exists(test_execution.root_path, project, test_case_name) if not test_exists: abort(404, 'The test {} does not exist'.format(test_case_name)) tc_name, parents = utils.separate_file_from_parents(test_case_name) path = test_case.generate_test_case_path(root_path, project, test_case_name) _, error = utils.import_module(path) if error: url = url_for('test_case_code_view', project=project, test_case_name=test_case_name) content = ('<h4>There are errors in the test</h4>' '<p>There are errors and the test cannot be displayed, ' 'open the test code editor to solve them.</p>' '<a class="btn btn-default" href="{}">Open Test Code</a>' .format(url)) return render_template('common_element_error.html', project=project, item_name=test_case_name, content=content) else: test_case_contents = test_case.get_test_case_content(root_path, project, test_case_name) test_data = test_data_module.get_test_data(root_path, project, test_case_name, repr_strings=True) return render_template('test_builder/test_case.html', project=project, test_case_contents=test_case_contents, test_case_name=tc_name, full_test_case_name=test_case_name, test_data=test_data)
def test_case_view(project, test_name): test = Test(project, test_name) if not test.exists: abort(404, 'The test {} does not exist'.format(test_name)) tc_name, parents = utils.separate_file_from_parents(test_name) _, error = utils.import_module(test.path) if error: url = url_for('webapp.test_case_code_view', project=project, test_name=test_name) content = ('<h4>There are errors in the test</h4>' '<p>There are errors and the test cannot be displayed, ' 'open the test code editor to solve them.</p>' '<a class="btn btn-default" href="{}">Open Test Code</a>'. format(url)) return render_template('common_element_error.html', project=project, item_name=test_name, content=content) else: test_data = test_data_module.get_test_data(project, test_name, repr_strings=True) return render_template('test_builder/test_case.html', project=project, test_components=test.components, test_case_name=tc_name, full_test_case_name=test_name, test_data=test_data)
def test_get_test_data(self, testdir_fixture, project_fixture): input_data = [ { 'col1': 'a', 'col2': 'b' }, { 'col1': 'c', 'col2': 'd', } ] test_execution.settings = settings_manager.get_project_settings(testdir_fixture['path'], project_fixture['name']) test_execution.settings['test_data'] = 'csv' test_case.new_test_case(testdir_fixture['path'], project_fixture['name'], [], 'test_get_data') test_data.save_external_test_data_file(testdir_fixture['path'], project_fixture['name'], 'test_get_data', input_data) returned_data = test_data.get_test_data(testdir_fixture['path'], project_fixture['name'], 'test_get_data') assert returned_data == input_data
def _define_execution_list(workspace, project, execution): """Generate the execution list execution is a dictionary containing: 'environments': list of envs 'tests': list of tests 'test_sets': each test contains a list of test sets (data sets) 'drivers': list of drivers This function generates the combinations of each of the above. Each test should be executed once per each: - data set - environment - driver """ execution_list = [] envs_data = environment_manager.get_environment_data(workspace, project) for test in execution['tests']: data_sets = test_data.get_test_data(workspace, project, test) for data_set in data_sets: for env in execution['environments']: data_set_env = dict(data_set) if env in envs_data: env_data = envs_data[env] ## adding env_data to data_set data_set_env['env'] = env_data data_set_env['env']['name'] = env for driver in execution['drivers']: execution_list.append({ 'test_name': test, 'data_set': data_set_env, 'driver': driver, 'report_directory': None }) return execution_list
def test_case_view(project, test_case_name): # check if user has permissions for this project if not user.has_permissions_to_project(g.user.id, project, root_path, 'gui'): return render_template('not_permission.html') # check if the file is locked # is_locked_by = lock.is_file_locked(root_path, project, test_case_name) # print(is_locked_by, g.user.username) # if is_locked_by and is_locked_by != g.user.username: # abort(404, 'This file is locked by someone else.') # else: tc_name, parents = utils.separate_file_from_parents(test_case_name) path = test_case.generate_test_case_path(root_path, project, test_case_name) error = utils.validate_python_file_syntax(path) if error: return render_template('test_builder/test_case_syntax_error.html', project=project, full_test_case_name=test_case_name) else: test_case_contents = test_case.get_test_case_content(root_path, project, test_case_name) test_data = test_data_module.get_test_data(root_path, project, test_case_name) return render_template('test_builder/test_case.html', project=project, test_case_contents=test_case_contents, test_case_name=tc_name, full_test_case_name=test_case_name, test_data=test_data)
def test_get_test_data_no_data(self, project_class, test_utils): """when there is csv and infile data, csv has priority""" _, project = project_class.activate() test_name = test_utils.random_string(5, 'test') test_content = "there_is = 'no data'\n" test_path = os.path.join(project_class.path, 'tests', test_name + '.py') with open(test_path, 'w+') as f: f.write(test_content) returned_data = test_data.get_test_data(project, test_name) assert returned_data == [{}]
def test_get_test_data_from_infile(self, project_class, test_utils): _, project = project_class.activate() test_name = test_utils.random_string(5, 'test') test_content = ("data = {\n" " 'key1': 'value1',\n" " 'key2': 'value2',\n" "}\n") test_path = os.path.join(project_class.path, 'tests', test_name + '.py') with open(test_path, 'w+') as f: f.write(test_content) expected = [{'key1': 'value1', 'key2': 'value2'}] returned_data = test_data.get_test_data(project, test_name) assert returned_data == expected
def test_get_test_data(self, project_class, test_utils): _, project = project_class.activate() test_name = test_utils.create_random_test(project) csv_data = [{'a': ' b'}] json_data = '[{"c": "d"}]' test_data.save_csv_test_data(project, test_name, csv_data) test_data.save_json_test_data(project, test_name, json_data) test_content = "data = {'e': 'f'}" with open(test.Test(project, test_name).path, 'w+') as f: f.write(test_content) data = test_data.get_test_data(project, test_name) expected = { 'csv': csv_data, 'json': json_data, 'internal': "data = {\n 'e': 'f',\n}" } assert data == expected
def _define_execution_list(workspace, project, execution): execution_list = [] envs_data = environment_manager.get_environment_data(workspace, project) for test in execution['tests']: data_sets = test_data.get_test_data(workspace, project, test) for data_set in data_sets: for env in execution['environments']: data_set_env = dict(data_set) if env in envs_data: env_data = envs_data[env] ## adding env_data to data_set data_set_env['env'] = env_data data_set_env['env']['name'] = env for driver in execution['drivers']: execution_list.append({ 'test_name': test, 'data_set': data_set_env, 'driver': driver, 'report_directory': None }) return execution_list
def run_test_or_suite(workspace, project, test=None, suite=None, directory_suite=None): '''run a test, a suite or a "directory suite"''' # suitex = { # 'tests': [] # } tests = [] threads = 1 suite_amount_workers = None suite_drivers = None suite_envs = [] drivers = [] suite_module = None report_suite_name = None is_suite = False # get test list if test: tests = [test] report_suite_name = 'single_tests' elif suite: tests = utils.get_suite_test_cases(workspace, project, suite) suite_amount_workers = utils.get_suite_amount_of_workers( workspace, project, suite) suite_drivers = utils.get_suite_browsers(workspace, project, suite) suite_envs = utils.get_suite_environments(workspace, project, suite) suite_module = utils.get_suite_module(test_execution.root_path, test_execution.project, suite) report_suite_name = suite is_suite = True if len(tests) == 0: print('Warning: suite {} does not have tests'.format(suite)) elif directory_suite: tests = utils.get_directory_suite_test_cases(workspace, project, directory_suite) report_suite_name = directory_suite is_suite = True else: sys.exit("ERROR: invalid arguments for run_test_or_suite()") # get threads if test_execution.thread_amount: # the thread count passed through cli has higher priority threads = test_execution.thread_amount elif suite_amount_workers: threads = suite_amount_workers settings_default_driver = test_execution.settings['default_browser'] drivers = utils.choose_driver_by_precedence( cli_drivers=test_execution.cli_drivers, suite_drivers=suite_drivers, settings_default_driver=settings_default_driver) # check if drivers are remote remote_browsers = settings_manager.get_remote_browsers( test_execution.settings) default_browsers = gui_utils.get_supported_browsers_suggestions() drivers_temp = [] for driver in drivers: if driver in remote_browsers: remote_browser = test_execution.settings['remote_browsers'][driver] _ = { 'name': remote_browser['browserName'], 'full_name': driver, 'remote': True, 'capabilities': remote_browser } drivers_temp.append(_) elif driver in default_browsers: _ = { 'name': driver, 'full_name': '', 'remote': False, 'capabilities': None } drivers_temp.append(_) else: msg = [ 'Error: the browser {} is not defined\n'.format(driver), 'available options are:\n', '\n'.join(default_browsers), '\n'.join(remote_browsers) ] sys.exit(''.join(msg)) drivers = drivers_temp # timestamp is passed when the test is executed from the GUI, # otherwise, a timestamp should be generated at this point # the timestamp is used to identify this unique execution of the test or suite if not test_execution.timestamp: test_execution.timestamp = utils.get_timestamp() ####### project_envs = environment_manager.get_envs(project) envs = [] if test_execution.cli_environments: # use the environments passed through command line if available envs = test_execution.cli_environments elif suite_envs: # use the environments defined in the suite envs = suite_envs elif project_envs: # if there are available envs, try to use the first by default envs = [project_envs[0]] else: # execute using a blank environment envs = [''] envs_data = environment_manager.get_environment_data(project) # get test data for each test present in the list of tests # for each test in the list, for each data set and driver combination # append an entry to the execution_list execution_list = [] for test_case in tests: data_sets = test_data.get_test_data(workspace, project, test_case) for data_set in data_sets: for env in envs: data_set_env = dict(data_set) if env in envs_data: env_data = envs_data[env] ## adding env_data to data_set data_set_env['env'] = env_data data_set_env['env']['name'] = env for driver in drivers: execution_list.append({ 'test_name': test_case, 'data_set': data_set_env, 'driver': driver, 'report_directory': None }) if is_suite: execution_directory = report.create_suite_execution_directory( test_execution.root_path, test_execution.project, report_suite_name, test_execution.timestamp) else: execution_directory = report.create_test_execution_directory( test_execution.root_path, test_execution.project, test, test_execution.timestamp) # for test in execution_list: # generate a report directory for this test report_directory = report.create_report_directory( execution_directory, test['test_name'], is_suite) test['report_directory'] = report_directory if suite: if hasattr(suite_module, 'before'): suite_module.before() if test_execution.interactive and threads != 1: print('Error: to run in debug mode, threads must equal one') if threads == 1: # run tests serially for test in execution_list: run_test(test_execution.root_path, test_execution.project, test['test_name'], test['data_set'], test['driver'], test_execution.settings, test['report_directory']) else: # run list of tests using multiprocessing multiprocess_executor(execution_list, is_suite, execution_directory, threads) if suite: if hasattr(suite_module, 'after'): suite_module.after()