def test_define_execution_list_multiple_tests(self, testdir_fixture): """Verify that the execution list is generated properly when there are multiple tests in the list """ root_path = testdir_fixture['path'] project = create_random_project(root_path) # create test one test_name_one = 'new_test_case_one' parents = [] test_case.new_test_case(root_path, project, parents, test_name_one) tdata = [{ 'col1': 'a', 'col2': 'b' }, { 'col1': 'c', 'col2': 'd', }] test_data.save_external_test_data_file(root_path, project, test_name_one, tdata) # create test two test_name_two = 'new_test_case_two' parents = [] test_case.new_test_case(root_path, project, parents, test_name_two) execution = { 'tests': [test_name_one, test_name_two], 'workers': 1, 'drivers': ['chrome'], 'environments': [''], 'suite_before': None, 'suite_after': None } execution_list = start_execution._define_execution_list( root_path, project, execution) expected_list = [{ 'test_name': 'new_test_case_one', 'data_set': { 'col1': 'a', 'col2': 'b' }, 'driver': 'chrome', 'report_directory': None }, { 'test_name': 'new_test_case_one', 'data_set': { 'col1': 'c', 'col2': 'd' }, 'driver': 'chrome', 'report_directory': None }, { 'test_name': 'new_test_case_two', 'data_set': {}, 'driver': 'chrome', 'report_directory': None }] assert execution_list == expected_list
def test_define_execution_list(self, testdir_fixture): """Verify that the execution list is generated properly when there's only one test without datasets, one driver and zero environments """ root_path = testdir_fixture['path'] project = create_random_project(root_path) test_name = 'new_test_case_001' parents = [] test_case.new_test_case(root_path, project, parents, test_name) execution = { 'tests': [test_name], 'workers': 1, 'drivers': ['chrome'], 'environments': [''], 'suite_before': None, 'suite_after': None } execution_list = start_execution._define_execution_list(root_path, project, execution) expected_list = [ { 'test_name': 'new_test_case_001', 'data_set': {}, 'driver': 'chrome', 'report_directory': None } ] assert execution_list == expected_list
def test_get_test_data(self, testdir_fixture, project_fixture): input_data = [ { 'col1': 'a', 'col2': 'b' }, { 'col1': 'c', 'col2': 'd', } ] test_execution.settings = settings_manager.get_project_settings(testdir_fixture['path'], project_fixture['name']) test_execution.settings['test_data'] = 'csv' test_case.new_test_case(testdir_fixture['path'], project_fixture['name'], [], 'test_get_data') test_data.save_external_test_data_file(testdir_fixture['path'], project_fixture['name'], 'test_get_data', input_data) returned_data = test_data.get_test_data(testdir_fixture['path'], project_fixture['name'], 'test_get_data') assert returned_data == input_data
def test_define_execution_list(self, testdir_fixture): """Verify that the execution list is generated properly when there's only one test without datasets, one driver and zero environments """ root_path = testdir_fixture['path'] project = create_random_project(root_path) test_name = 'new_test_case_001' parents = [] test_case.new_test_case(root_path, project, parents, test_name) execution = { 'tests': [test_name], 'workers': 1, 'drivers': ['chrome'], 'environments': [''], 'suite_before': None, 'suite_after': None } execution_list = start_execution._define_execution_list( root_path, project, execution) expected_list = [{ 'test_name': 'new_test_case_001', 'data_set': {}, 'driver': 'chrome', 'report_directory': None }] assert execution_list == expected_list
def test_define_execution_list_multiple_envs(self, project_function_clean): """Verify that the execution list is generated properly when the execution has multiple envs """ _, project = project_function_clean.activate() # create test one test_name_one = 'test_one_003' parents = [] test_case.new_test_case(project, parents, test_name_one) # create two environments in environments.json env_data = { "stage": {"url": "xxx"}, "preview": {"url": "yyy"} } env_data_json = json.dumps(env_data) environment_manager.save_environments(project, env_data_json) execution_runner = exc_runner.ExecutionRunner() execution_runner.tests = [test_name_one] execution_runner.execution.processes = 1 execution_runner.execution.browsers = ['chrome'] execution_runner.execution.envs = ['stage', 'preview'] execution_runner.project = project execution_list = execution_runner._define_execution_list() expected_list = [ SimpleNamespace(name='test_one_003', data_set={'env': {'url': 'xxx', 'name': 'stage'}}, secrets={}, browser='chrome', reportdir=None), SimpleNamespace(name='test_one_003', data_set={'env': {'url': 'yyy', 'name': 'preview'}}, secrets={}, browser='chrome', reportdir=None) ] assert execution_list == expected_list
def test_duplicate_test(self, project_function): _, project = project_function.activate() test_case.new_test_case(project, [], 'test1') data_path_data = os.path.join(project_function.path, 'data', 'test1.csv') os.makedirs(os.path.dirname(data_path_data)) open(data_path_data, 'x').close() data_path_tests = os.path.join(project_function.path, 'tests', 'test1.csv') open(data_path_tests, 'x').close() errors = utils.duplicate_element(project, 'test', 'test1', 'subdir.test2') assert errors == [] path = os.path.join(project_function.path, 'tests', 'test1.py') assert os.path.isfile(path) path = os.path.join(project_function.path, 'tests', 'test1.csv') assert os.path.isfile(path) path = os.path.join(project_function.path, 'data', 'test1.csv') assert os.path.isfile(path) path = os.path.join(project_function.path, 'tests', 'subdir', 'test2.py') assert os.path.isfile(path) path = os.path.join(project_function.path, 'tests', 'subdir', 'test2.csv') assert os.path.isfile(path) path = os.path.join(project_function.path, 'data', 'subdir', 'test2.csv') assert os.path.isfile(path)
def test_define_execution_list_multiple_drivers(self, project_function_clean): """Verify that the execution list is generated properly when there are multiple drivers in the list """ _, project = project_function_clean.activate() # create test one test_name_one = 'test_one_004' parents = [] test_case.new_test_case(project, parents, test_name_one) # create test two test_name_two = 'test_two_004' parents = [] test_case.new_test_case(project, parents, test_name_two) execution_runner = exc_runner.ExecutionRunner() execution_runner.tests = [test_name_one, test_name_two] execution_runner.execution.processes = 1 execution_runner.execution.browsers = ['chrome', 'firefox'] execution_runner.execution.envs = [''] execution_runner.project = project execution_list = execution_runner._define_execution_list() expected_list = [ SimpleNamespace(name='test_one_004', data_set={}, secrets={}, browser='chrome', reportdir=None), SimpleNamespace(name='test_one_004', data_set={}, secrets={}, browser='firefox', reportdir=None), SimpleNamespace(name='test_two_004', data_set={}, secrets={}, browser='chrome', reportdir=None), SimpleNamespace(name='test_two_004', data_set={}, secrets={}, browser='firefox', reportdir=None) ] assert execution_list == expected_list
def test_define_execution_list_multiple_data_sets(self, project_function_clean): """Verify that the execution list is generated properly when a test has multiple data sets """ _, project = project_function_clean.activate() test_name = 'test_002' parents = [] test_case.new_test_case(project, parents, test_name) tdata = [ { 'col1': 'a', 'col2': 'b' }, { 'col1': 'c', 'col2': 'd', } ] test_data.save_external_test_data_file(project, test_name, tdata) execution_runner = exc_runner.ExecutionRunner() execution_runner.tests = [test_name] execution_runner.execution.processes = 1 execution_runner.execution.browsers = ['chrome'] execution_runner.execution.envs = [''] execution_runner.project = project_function_clean.name execution_list = execution_runner._define_execution_list() expected_list = [ SimpleNamespace(name=test_name, data_set={'col1': 'a', 'col2': 'b'}, secrets={}, browser='chrome', reportdir=None), SimpleNamespace(name=test_name, data_set={'col1': 'c', 'col2': 'd'}, secrets={}, browser='chrome', reportdir=None) ] assert execution_list == expected_list
def test_get_internal_test_data(self, testdir_fixture, project_fixture): test_name = 'test_get_internal_test_data' input_data = [ { 'col1': "'a'", 'col2': "'b'" }, { 'col1': "'c'", 'col2': "'d'", } ] test_case.new_test_case(testdir_fixture['path'], project_fixture['name'], [], test_name) test_steps = { 'setup': [], 'test': [], 'teardown': [] } test_execution.settings = settings_manager.get_project_settings(testdir_fixture['path'], project_fixture['name']) test_execution.settings['test_data'] = 'infile' test_case.save_test_case(testdir_fixture['path'], project_fixture['name'], test_name, '', [], test_steps, input_data) internal_data = test_data.get_internal_test_data(testdir_fixture['path'], project_fixture['name'], test_name) assert internal_data == input_data
def test_new_test_case_file_exists(self, project_class): _, project = project_class.activate() test_name = 'new_test_case_002' parents = ['aaaa', 'bbbb'] test_case.new_test_case(project, parents, test_name) errors = test_case.new_test_case(project, parents, test_name) assert errors == ['A test with that name already exists']
def test_new_test_case_file_exists(self, project_class): testdir = project_class.testdir project = project_class.name test_name = 'new_test_case_002' parents = ['aaaa', 'bbbb'] test_case.new_test_case(testdir, project, parents, test_name) errors = test_case.new_test_case(testdir, project, parents, test_name) assert errors == ['a test with that name already exists']
def create_test(testdir, project, parents, name, content=None): if content is None: content = ('def test(data):\n' ' print("hello")\n') test_case.new_test_case(testdir, project, parents, name) path = os.path.join(testdir, 'projects', project, 'tests', os.sep.join(parents), name + '.py') with open(path, 'w+') as f: f.write(content)
def test_define_execution_list_multiple_tests_datasets_drivers_envs( self, project_function_clean): """Verify that the execution list is generated properly when there are multiple tests, data sets, drivers and environments """ _, project = project_function_clean.activate() # create test one test_name_one = 'test_one_005' parents = [] test_case.new_test_case(project, parents, test_name_one) # test data for test one tdata = [ { 'col1': 'a', }, { 'col1': 'b', } ] test_data.save_external_test_data_file(project, test_name_one, tdata) # create test two test_name_two = 'test_two_005' parents = [] test_case.new_test_case(project, parents, test_name_two) # create two environments env_data = { "stage": { "url": "xxx" }, "preview": { "url": "yyy" } } env_data_json = json.dumps(env_data) environment_manager.save_environments(project, env_data_json) execution_runner = exc_runner.ExecutionRunner() execution_runner.tests = [test_name_one, test_name_two] execution_runner.execution.processes = 1 execution_runner.execution.browsers = ['chrome', 'firefox'] execution_runner.execution.envs = ['stage', 'preview'] execution_runner.project = project execution_list = execution_runner._define_execution_list() expected_list = [ SimpleNamespace(browser='chrome', data_set={'col1': 'a', 'env': {'url': 'xxx', 'name': 'stage'}}, secrets={}, name='test_one_005', reportdir=None), SimpleNamespace(browser='firefox', data_set={'col1': 'a', 'env': {'url': 'xxx', 'name': 'stage'}}, secrets={}, name='test_one_005', reportdir=None), SimpleNamespace(browser='chrome', data_set={'col1': 'a', 'env': {'url': 'yyy', 'name': 'preview'}}, secrets={}, name='test_one_005', reportdir=None), SimpleNamespace(browser='firefox', data_set={'col1': 'a', 'env': {'url': 'yyy', 'name': 'preview'}}, secrets={}, name='test_one_005', reportdir=None), SimpleNamespace(browser='chrome', data_set={'col1': 'b', 'env': {'url': 'xxx', 'name': 'stage'}}, secrets={}, name='test_one_005', reportdir=None), SimpleNamespace(browser='firefox', data_set={'col1': 'b', 'env': {'url': 'xxx', 'name': 'stage'}}, secrets={}, name='test_one_005', reportdir=None), SimpleNamespace(browser='chrome', data_set={'col1': 'b', 'env': {'url': 'yyy', 'name': 'preview'}}, secrets={}, name='test_one_005', reportdir=None), SimpleNamespace(browser='firefox', data_set={'col1': 'b', 'env': {'url': 'yyy', 'name': 'preview'}}, secrets={}, name='test_one_005', reportdir=None), SimpleNamespace(browser='chrome', data_set={'env': {'url': 'xxx', 'name': 'stage'}}, secrets={}, name='test_two_005', reportdir=None), SimpleNamespace(browser='firefox', data_set={'env': {'url': 'xxx', 'name': 'stage'}}, secrets={}, name='test_two_005', reportdir=None), SimpleNamespace(browser='chrome', data_set={'env': {'url': 'yyy', 'name': 'preview'}}, secrets={}, name='test_two_005', reportdir=None), SimpleNamespace(browser='firefox', data_set={'env': {'url': 'yyy','name': 'preview'}}, secrets={}, name='test_two_005', reportdir=None) ] assert execution_list == expected_list
def test_define_execution_list_multiple_tests(self, project_function_clean): """Verify that the execution list is generated properly when there are multiple tests in the list """ testdir = project_function_clean.testdir project = project_function_clean.name os.chdir(testdir) # create test one test_name_one = 'test_one_001' parents = [] test_case.new_test_case(testdir, project, parents, test_name_one) tdata = [{ 'col1': 'a', 'col2': 'b' }, { 'col1': 'c', 'col2': 'd', }] test_data.save_external_test_data_file(testdir, project, test_name_one, tdata) # create test two test_name_two = 'test_two_001' parents = [] test_case.new_test_case(testdir, project, parents, test_name_two) execution_runner = exc_runner.ExecutionRunner() execution_runner.tests = [test_name_one, test_name_two] execution_runner.execution.processes = 1 execution_runner.execution.browsers = ['chrome'] execution_runner.execution.envs = [''] execution_runner.project = project execution_list = execution_runner._define_execution_list() expected_list = [ SimpleNamespace(name='test_one_001', data_set={ 'col1': 'a', 'col2': 'b' }, secrets={}, browser='chrome', reportdir=None), SimpleNamespace(name='test_one_001', data_set={ 'col1': 'c', 'col2': 'd' }, secrets={}, browser='chrome', reportdir=None), SimpleNamespace(name='test_two_001', data_set={}, secrets={}, browser='chrome', reportdir=None) ] assert execution_list == expected_list
def test_new_test_case_file_exists(self, project_fixture): root_path = project_fixture['testdir'] project = project_fixture['name'] test_name = 'new_test_case_002' parents = ['aaaa', 'bbbb'] test_case.new_test_case(root_path, project, parents, test_name) errors = test_case.new_test_case(root_path, project, parents, test_name) assert errors == ['A test with that name already exists']
def test_get_test_case_content_empty_test(self, project_function): _, project = project_function.activate() test_name = 'some_test_case' test_case.new_test_case(project, [], test_name) test_content = test_case.get_test_case_content(project, test_name) assert test_content['description'] == '' assert test_content['pages'] == [] assert test_content['steps']['setup'] == [] assert test_content['steps']['test'] == [] assert test_content['steps']['teardown'] == []
def test_define_execution_list_multiple_envs(self, project_function_clean): """Verify that the execution list is generated properly when the execution has multiple envs """ root_path = project_function_clean['testdir'] project = project_function_clean['name'] os.chdir(root_path) # create test one test_name_one = 'test_one_003' parents = [] test_case.new_test_case(root_path, project, parents, test_name_one) # create two environments in environments.json env_data = {"stage": {"url": "xxx"}, "preview": {"url": "yyy"}} env_data_json = json.dumps(env_data) environment_manager.save_environments(root_path, project, env_data_json) execution = { 'tests': [test_name_one], 'workers': 1, 'drivers': ['chrome'], 'environments': ['stage', 'preview'], 'suite_before': None, 'suite_after': None } execution_list = start_execution._define_execution_list( root_path, project, execution) expected_list = [ { 'test_name': 'test_one_003', 'data_set': { 'env': { 'url': 'xxx', 'name': 'stage' } }, 'driver': 'chrome', 'report_directory': None }, { 'test_name': 'test_one_003', 'data_set': { 'env': { 'url': 'yyy', 'name': 'preview' } }, 'driver': 'chrome', 'report_directory': None }, ] assert execution_list == expected_list
def test_new_test_case_with_parents_already_exist(self, project_session): testdir = project_session.testdir project = project_session.name test_name1 = 'test_new_0004' test_name2 = 'test_new_0005' parents = ['asf01'] test_case.new_test_case(testdir, project, parents, test_name1) errors = test_case.new_test_case(testdir, project, parents, test_name2) path = os.path.join(project_session.path, 'tests', os.sep.join(parents), test_name2 + '.py') assert errors == [] assert os.path.isfile(path)
def test_get_suite_test_cases_get_all(self, project_function): _, project = project_function.activate() test_case.new_test_case(project, [], 'test_name_01') test_case.new_test_case(project, ['a', 'b'], 'test_name_02') suite_name = 'test_suite_004' suite.new_suite(project, [], suite_name) tests = ['*'] suite.save_suite(project, suite_name, tests, 1, [], [], []) result = suite.get_suite_test_cases(project, suite_name) expected = ['test_name_01', 'a.b.test_name_02'] assert result == expected
def test_define_execution_list_multiple_drivers(self, testdir_fixture): """Verify that the execution list is generated properly when there are multiple drivers in the list """ root_path = testdir_fixture['path'] project = create_random_project(root_path) # create test one test_name_one = 'new_test_case_one' parents = [] test_case.new_test_case(root_path, project, parents, test_name_one) # create test two test_name_two = 'new_test_case_two' parents = [] test_case.new_test_case(root_path, project, parents, test_name_two) execution = { 'tests': [test_name_one, test_name_two], 'workers': 1, 'drivers': ['chrome', 'firefox'], 'environments': [''], 'suite_before': None, 'suite_after': None } execution_list = start_execution._define_execution_list(root_path, project, execution) expected_list = [ { 'test_name': 'new_test_case_one', 'data_set': {}, 'driver': 'chrome', 'report_directory': None }, { 'test_name': 'new_test_case_one', 'data_set': {}, 'driver': 'firefox', 'report_directory': None }, { 'test_name': 'new_test_case_two', 'data_set': {}, 'driver': 'chrome', 'report_directory': None }, { 'test_name': 'new_test_case_two', 'data_set': {}, 'driver': 'firefox', 'report_directory': None } ] assert execution_list == expected_list
def test_define_execution_list_multiple_data_sets(self, project_function_clean): """Verify that the execution list is generated properly when a test has multiple data sets """ root_path = project_function_clean['testdir'] project = project_function_clean['name'] os.chdir(root_path) test_name = 'new_test_case_002' parents = [] test_case.new_test_case(root_path, project, parents, test_name) tdata = [{ 'col1': 'a', 'col2': 'b' }, { 'col1': 'c', 'col2': 'd', }] test_data.save_external_test_data_file(root_path, project, test_name, tdata) execution = { 'tests': [test_name], 'workers': 1, 'drivers': ['chrome'], 'environments': [''], 'suite_before': None, 'suite_after': None } execution_list = start_execution._define_execution_list( root_path, project, execution) expected_list = [{ 'test_name': 'new_test_case_002', 'data_set': { 'col1': 'a', 'col2': 'b' }, 'driver': 'chrome', 'report_directory': None }, { 'test_name': 'new_test_case_002', 'data_set': { 'col1': 'c', 'col2': 'd' }, 'driver': 'chrome', 'report_directory': None }] assert execution_list == expected_list
def test_new_test_case_with_parents_already_exist( self, permanent_project_fixture): testdir = permanent_project_fixture['testdir'] project = permanent_project_fixture['name'] test_name1 = 'test_new_0004' test_name2 = 'test_new_0005' parents = ['asf01'] test_case.new_test_case(testdir, project, parents, test_name1) errors = test_case.new_test_case(testdir, project, parents, test_name2) path = os.path.join(testdir, 'projects', project, 'tests', os.sep.join(parents), test_name2 + '.py') assert errors == [] assert os.path.isfile(path)
def test_define_execution_list_multiple_drivers(self, project_function_clean): """Verify that the execution list is generated properly when there are multiple drivers in the list """ root_path = project_function_clean['testdir'] project = project_function_clean['name'] os.chdir(root_path) # create test one test_name_one = 'test_one_004' parents = [] test_case.new_test_case(root_path, project, parents, test_name_one) # create test two test_name_two = 'test_two_004' parents = [] test_case.new_test_case(root_path, project, parents, test_name_two) execution = { 'tests': [test_name_one, test_name_two], 'workers': 1, 'drivers': ['chrome', 'firefox'], 'environments': [''], 'suite_before': None, 'suite_after': None } execution_list = start_execution._define_execution_list( root_path, project, execution) expected_list = [{ 'test_name': 'test_one_004', 'data_set': {}, 'driver': 'chrome', 'report_directory': None }, { 'test_name': 'test_one_004', 'data_set': {}, 'driver': 'firefox', 'report_directory': None }, { 'test_name': 'test_two_004', 'data_set': {}, 'driver': 'chrome', 'report_directory': None }, { 'test_name': 'test_two_004', 'data_set': {}, 'driver': 'firefox', 'report_directory': None }] assert execution_list == expected_list
def test_define_execution_list_multiple_data_sets(self, testdir_fixture): """Verify that the execution list is generated properly when a test has multiple data sets """ root_path = testdir_fixture['path'] project = create_random_project(root_path) test_name = 'new_test_case_002' parents = [] test_case.new_test_case(root_path, project, parents, test_name) tdata = [ { 'col1': 'a', 'col2': 'b' }, { 'col1': 'c', 'col2': 'd', } ] test_data.save_external_test_data_file(root_path, project, test_name, tdata) execution = { 'tests': [test_name], 'workers': 1, 'drivers': ['chrome'], 'environments': [''], 'suite_before': None, 'suite_after': None } execution_list = start_execution._define_execution_list(root_path, project, execution) expected_list = [ { 'test_name': 'new_test_case_002', 'data_set': {'col1': 'a', 'col2': 'b'}, 'driver': 'chrome', 'report_directory': None }, { 'test_name': 'new_test_case_002', 'data_set': {'col1': 'c', 'col2': 'd'}, 'driver': 'chrome', 'report_directory': None } ] assert execution_list == expected_list
def test_define_execution_list_multiple_envs(self, testdir_fixture): """Verify that the execution list is generated properly when the execution has multiple envs """ root_path = testdir_fixture['path'] project = create_random_project(root_path) # create test one test_name_one = 'new_test_case_one' parents = [] test_case.new_test_case(root_path, project, parents, test_name_one) # create two environments in environments.json env_data = { "stage": { "url": "xxx" }, "preview": { "url": "yyy" } } env_data_json = json.dumps(env_data) environment_manager.save_environments(root_path, project, env_data_json) execution = { 'tests': [test_name_one], 'workers': 1, 'drivers': ['chrome'], 'environments': ['stage', 'preview'], 'suite_before': None, 'suite_after': None } execution_list = start_execution._define_execution_list(root_path, project, execution) expected_list = [ { 'test_name': 'new_test_case_one', 'data_set': {'env': {'url': 'xxx', 'name': 'stage'}}, 'driver': 'chrome', 'report_directory': None }, { 'test_name': 'new_test_case_one', 'data_set': {'env': {'url': 'yyy', 'name': 'preview'}}, 'driver': 'chrome', 'report_directory': None }, ] assert execution_list == expected_list
def test_save_test_case_code_csv_data(self, project_function): _, project = project_function.activate() test_name = 'test_one' test_data = [{'key': "'value'"}] session.settings['test_data'] = 'csv' test_case.new_test_case(project, [], test_name) test_case.save_test_case_code(project, test_name, SAMPLE_TEST_CONTENT, test_data) path = os.path.join(project_function.path, 'tests', test_name + '.py') with open(path) as f: assert f.read() == SAMPLE_TEST_CONTENT path = os.path.join(project_function.path, 'tests', test_name + '.csv') expected = ('key\n' '\'value\'\n') with open(path) as f: assert f.read() == expected
def test_save_test_case_data_infile(self, project_function): _, project = project_function.activate() test_case.new_test_case(project, ['a', 'b'], 'test_one') description = 'description' page_objects = ['page1', 'page2'] test_steps = { 'setup': [{ 'action': 'click', 'parameters': ['elem1'] }], 'test': [{ 'action': 'send_keys', 'parameters': ['elem2', 'keys'] }], 'teardown': [] } data = [{'key': '\'value\''}] settings_manager.save_project_settings(project, '{"test_data": "infile"}') test_case.save_test_case(project, 'a.b.test_one', description, page_objects, test_steps, data, []) path = os.path.join(project_function.path, 'tests', 'a', 'b', 'test_one.py') expected = ('\n' 'description = \'description\'\n' '\n' 'tags = []\n' '\n' 'pages = [\'page1\',\n' ' \'page2\']\n' '\n' 'data = [\n' ' {\n' ' \'key\': \'value\',\n' ' },\n' ']\n' '\n' 'def setup(data):\n' ' click(elem1)\n' '\n' 'def test(data):\n' ' send_keys(elem2, keys)\n' '\n' 'def teardown(data):\n' ' pass\n') with open(path) as f: assert f.read() == expected
def new_tree_element(): if request.method == 'POST': project = request.form['project'] elem_type = request.form['elementType'] is_dir = json.loads(request.form['isDir']) full_path = request.form['fullPath'] add_parents = request.form['addParents'] full_path = full_path.replace(' ', '_') dot_path = full_path errors = [] full_path = full_path.split('.') element_name = full_path.pop() parents = full_path # verify that the string only contains letters, numbers # dashes or underscores for c in element_name: if not c.isalnum() and not c in ['-', '_']: errors.append('Only letters, numbers, \'-\' and \'_\' are allowed') break if not errors: if elem_type == 'test': if is_dir: errors = file_manager.new_directory_of_type(root_path, project, parents, element_name, dir_type='tests') else: errors = test_case.new_test_case(root_path, project, parents, element_name) # changelog.log_change(root_path, project, 'CREATE', 'test', # full_path, g.user.username) elif elem_type == 'page': if is_dir: errors = file_manager.new_directory_of_type(root_path, project, parents, element_name, dir_type='pages') else: errors = page_object.new_page_object(root_path, project, parents, element_name, add_parents=add_parents) elif elem_type == 'suite': if is_dir: errors = file_manager.new_directory_of_type(root_path, project, parents, element_name, dir_type='suites') else: errors = suite_module.new_suite(root_path, project, parents, element_name) element = { 'name': element_name, 'full_path': dot_path, 'type': elem_type, 'is_directory': is_dir } return json.dumps({'errors': errors, 'project_name': project, 'element': element})
def test_get_test_data_dict_list(self, testdir_fixture, project_fixture): input_data = [{ 'col1': 'a', 'col2': 'b' }, { 'col1': 'c', 'col2': 'd', }] test_case.new_test_case(testdir_fixture['path'], project_fixture['name'], [], 'test_get_data') data.save_test_data(testdir_fixture['path'], project_fixture['name'], 'test_get_data', input_data) returned_data = utils.get_test_data_dict_list(testdir_fixture['path'], project_fixture['name'], 'test_get_data') assert returned_data == input_data
def test_save_test_case_data_infile(self, project_function): testdir = project_function.testdir project = project_function.name test_case.new_test_case(testdir, project, ['a', 'b'], 'test_one') description = 'description' page_objects = ['page1', 'page2'] test_steps = { 'setup': [ {'action': 'click', 'parameters': ['elem1']} ], 'test': [ {'action': 'send_keys', 'parameters': ['elem2', 'keys']} ], 'teardown': [] } test_data = [{ 'key': '\'value\'' }] test_execution.settings['test_data'] = 'infile' test_case.save_test_case(testdir, project, 'a.b.test_one', description, page_objects, test_steps, test_data) path = os.path.join(project_function.path, 'tests', 'a', 'b', 'test_one.py') expected = ( '\n' 'description = \'description\'\n' '\n' 'pages = [\'page1\',\n' ' \'page2\']\n' '\n' 'data = [\n' ' {\n' ' \'key\': \'value\',\n' ' },\n' ']\n' '\n' 'def setup(data):\n' ' click(elem1)\n' '\n' 'def test(data):\n' ' send_keys(elem2, keys)\n' '\n' 'def teardown(data):\n' ' pass\n') with open(path) as f: assert f.read() == expected
def new_tree_element(): if request.method == 'POST': project = request.form['project'] elem_type = request.form['elementType'] is_dir = json.loads(request.form['isDir']) full_path = request.form['fullPath'] add_parents = request.form['addParents'] full_path = full_path.replace(' ', '_') dot_path = full_path errors = [] full_path = full_path.split('.') element_name = full_path.pop() parents = full_path # verify that the string only contains letters, numbers # dashes or underscores for c in element_name: if not c.isalnum() and not c in ['-', '_']: errors.append('Only letters, numbers, \'-\' and \'_\' are allowed') break if not errors: if elem_type == 'test': if is_dir: errors = io_manager.new_directory(root_path, project, parents, element_name, dir_type='tests') else: errors = test_case.new_test_case(root_path, project, parents, element_name) # changelog.log_change(root_path, project, 'CREATE', 'test', # full_path, g.user.username) elif elem_type == 'page': if is_dir: errors = io_manager.new_directory(root_path, project, parents, element_name, dir_type='pages') else: errors = page_object.new_page_object(root_path, project, parents, element_name, add_parents=add_parents) elif elem_type == 'suite': if is_dir: errors = io_manager.new_directory(root_path, project, parents, element_name, dir_type='suites') else: errors = suite_module.new_suite(root_path, project, parents, element_name) element = { 'name': element_name, 'full_path': dot_path, 'type': elem_type, 'is_directory': is_dir } return json.dumps({'errors': errors, 'project_name': project, 'element': element})
def test_delete_test_with_data(self, project_function): """"test that when a test is deleted the data files are deleted as well """ _, project = project_function.activate() test_case.new_test_case(project, [], 'test1') data_path_data = os.path.join(project_function.path, 'data', 'test1.csv') os.makedirs(os.path.dirname(data_path_data)) open(data_path_data, 'x').close() data_path_tests = os.path.join(project_function.path, 'tests', 'test1.csv') open(data_path_tests, 'x').close() errors = utils.delete_element(project, 'test', 'test1') assert errors == [] test_path = os.path.join(project_function.path, 'tests', 'test1.py') assert not os.path.exists(test_path) assert not os.path.exists(data_path_data) assert not os.path.exists(data_path_tests)
def createtest_command(project, test): if not utils.project_exists(project): msg = ('golem createtest: error: a project with name {} ' 'does not exist'.format(project)) sys.exit(msg) dot_path = test.split('.') test_name = dot_path.pop() errors = test_case.new_test_case(project, dot_path, test_name) if errors: sys.exit('golem createtest: error: {}'.format(' '.join(errors)))
def test_new_test_case(self, project_class): testdir = project_class.testdir project = project_class.name test_name = 'new_test_case_001' parents = ['aaaa', 'bbbb'] errors = test_case.new_test_case(testdir, project, parents, test_name) path = os.path.join(project_class.path, 'tests', os.sep.join(parents), test_name+'.py') assert os.path.isfile(path) assert errors == [] test_code = test_case.get_test_case_code(path) assert test_code == NEW_TEST_CONTENT
def test_save_test_case_data_csv(self, project_function): _, project = project_function.activate() test_case.new_test_case(project, ['a', 'b'], 'test_one') description = 'description' page_objects = [] test_steps = { 'setup': [], 'test': [{ 'action': 'send_keys', 'parameters': ['elem2', 'keys'] }], 'teardown': [] } data = [{'key': '\'value\''}] session.settings['test_data'] = 'csv' test_case.save_test_case(project, 'a.b.test_one', description, page_objects, test_steps, data, []) path = os.path.join(project_function.path, 'tests', 'a', 'b', 'test_one.py') expected = ('\n' 'description = \'description\'\n' '\n' 'tags = []\n' '\n' 'pages = []\n' '\n' 'def setup(data):\n' ' pass\n' '\n' 'def test(data):\n' ' send_keys(elem2, keys)\n' '\n' 'def teardown(data):\n' ' pass\n') with open(path) as f: assert f.read() == expected data_path = os.path.join(project_function.path, 'tests', 'a', 'b', 'test_one.csv') expected = ('key\n' '\'value\'\n') with open(data_path) as f: assert f.read() == expected
def run(self, test_execution, args): root_path = test_execution.root_path if args.project not in utils.get_projects(root_path): raise CommandException( 'Error: a project with that name does not exist' ) dot_path = args.test.split('.') test_name = dot_path.pop() errors = test_case.new_test_case(root_path, args.project, dot_path, test_name) if errors: raise CommandException('\n'.join(errors))
def test_get_test_cases(self, testdir_fixture, project_fixture): test_case.new_test_case(testdir_fixture['path'], project_fixture['name'], ['subdir1', 'subdir2'], 'test3') test_case.new_test_case(testdir_fixture['path'], project_fixture['name'], ['subdir1'], 'test2') test_case.new_test_case(testdir_fixture['path'], project_fixture['name'], [], 'test1') tests = utils.get_test_cases(testdir_fixture['path'], project_fixture['name']) expected_result = { 'type': 'directory', 'name': 'tests', 'dot_path': '.', 'sub_elements': [ { 'type': 'directory', 'name': 'subdir1', 'dot_path': 'subdir1', 'sub_elements': [ { 'type': 'directory', 'name': 'subdir2', 'dot_path': 'subdir1.subdir2', 'sub_elements': [ { 'type': 'file', 'name': 'test3', 'dot_path': 'subdir1.subdir2.test3', 'sub_elements': [] } ] }, { 'type': 'file', 'name': 'test2', 'dot_path': 'subdir1.test2', 'sub_elements': [] } ] }, { 'type': 'file', 'name': 'test1', 'dot_path': 'test1', 'sub_elements': [] } ] } assert tests == expected_result