コード例 #1
0
    def test_run_with_not_existing_environments(self, project_function,
                                                test_utils, capsys):
        """Run tests with a not existing environment.
        It should throw an error and finish with status code 1
        """
        _, project = project_function.activate()
        test_utils.create_test(project, 'test01')
        timestamp = utils.get_timestamp()
        execution_runner = exc_runner.ExecutionRunner(
            browsers=['chrome'],
            timestamp=timestamp,
            environments=['not_existing'])
        execution_runner.project = project
        with pytest.raises(SystemExit) as wrapped_execution:
            execution_runner.run_directory('')

        assert wrapped_execution.value.code == 1
        out, err = capsys.readouterr()
        msg = (
            'ERROR: the following environments do not exist for project {}: '
            'not_existing'.format(project))
        assert msg in out
        data = exec_report.get_execution_data(project=project,
                                              suite='all',
                                              execution=timestamp)
        assert data['has_finished'] is True
        assert data['total_tests'] == 0
コード例 #2
0
 def test_run_suite_filter_by_invalid_tag_expression(
         self, _project_with_tags, test_utils, capsys):
     """When a invalid tag expression is used a message is displayed
     to the console, no tests are run, the report is generated,
     and the execution exists with status code 1
     """
     _, project = _project_with_tags.activate()
     suite_name = test_utils.random_numeric_string(10, 'suite')
     tests = [
         _project_with_tags.t.test_alfa_bravo,
         _project_with_tags.t.test_bravo_charlie
     ]
     test_utils.create_suite(project, suite_name, tests=tests)
     timestamp = utils.get_timestamp()
     execution_runner = exc_runner.ExecutionRunner(browsers=['chrome'],
                                                   timestamp=timestamp,
                                                   tags=['sierra = tango'])
     execution_runner.project = project
     with pytest.raises(SystemExit):
         execution_runner.run_suite(suite_name)
     out, err = capsys.readouterr()
     expected = (
         "InvalidTagExpression: unknown expression <class '_ast.Assign'>, the "
         "only valid operators for tag expressions are: 'and', 'or' & 'not'"
     )
     assert expected in out
     data = exec_report.get_execution_data(project=project,
                                           suite=suite_name,
                                           execution=timestamp)
     assert data['has_finished'] is True
     assert data['total_tests'] == 0
コード例 #3
0
 def test_run_single_test_with_two_sets(self, project_class, test_utils,
                                        capsys):
     """Run a single test with two data sets.
     It should display the number of tests and test sets found."""
     testdir, project = project_class.activate()
     test_name = 'foo002'
     timestamp = utils.get_timestamp()
     session.settings = settings_manager.get_project_settings(project)
     content = ('data = [{"foo": 1}, {"foo": 2}]\n'
                'def test(data):\n'
                '    pass\n')
     test_utils.create_test(project, test_name, content=content)
     execution_runner = exc_runner.ExecutionRunner(browsers=['chrome'],
                                                   timestamp=timestamp)
     execution_runner.project = project
     execution_runner.run_test(test_name)
     out, err = capsys.readouterr()
     # number of tests is displayed
     assert 'Tests found: 1 (2 sets)' in out
     test_report_dir = os.path.join(testdir, 'projects', project, 'reports',
                                    'single_tests', test_name, timestamp)
     assert os.path.isdir(test_report_dir)
     items = os.listdir(test_report_dir)
     # two test set dirs + report.json
     assert len(items) == 3
コード例 #4
0
 def test_define_execution_list_multiple_envs(self, project_function_clean):
     """Verify that the execution list is generated properly when the execution
     has multiple envs
     """
     _, project = project_function_clean.activate()
     # create test one
     test_name_one = 'test_one_003'
     test.create_test(project, test_name_one)
     # create two environments in environments.json
     env_data = {"stage": {"url": "xxx"}, "preview": {"url": "yyy"}}
     env_data_json = json.dumps(env_data)
     environment_manager.save_environments(project, env_data_json)
     execution_runner = exc_runner.ExecutionRunner(project)
     execution_runner.tests = [test_name_one]
     execution_runner.execution.processes = 1
     execution_runner.execution.browsers = ['chrome']
     execution_runner.execution.envs = ['stage', 'preview']
     exec_list = execution_runner._define_execution_list()
     assert exec_list[0].data_set == {
         'env': {
             'url': 'xxx',
             'name': 'stage'
         }
     }
     assert exec_list[0].env == 'stage'
     assert exec_list[1].data_set == {
         'env': {
             'url': 'yyy',
             'name': 'preview'
         }
     }
     assert exec_list[1].env == 'preview'
コード例 #5
0
 def test_define_execution_list_multiple_drivers(self,
                                                 project_function_clean):
     """Verify that the execution list is generated properly when there
     are multiple drivers in the list
     """
     _, project = project_function_clean.activate()
     # create test one
     test_name_one = 'test_one_004'
     test.create_test(project, test_name_one)
     # create test two
     test_name_two = 'test_two_004'
     test.create_test(project, test_name_two)
     execution_runner = exc_runner.ExecutionRunner(project)
     execution_runner.tests = [test_name_one, test_name_two]
     execution_runner.execution.processes = 1
     execution_runner.execution.browsers = ['chrome', 'firefox']
     execution_runner.execution.envs = []
     execution_list = execution_runner._define_execution_list()
     # expected_list = [
     #     SimpleNamespace(name='test_one_004', data_set={}, secrets={}, browser='chrome', reportdir=None, env=None, set_name=''),
     #     SimpleNamespace(name='test_one_004', data_set={}, secrets={}, browser='firefox', reportdir=None, env=None, set_name=''),
     #     SimpleNamespace(name='test_two_004', data_set={}, secrets={}, browser='chrome', reportdir=None, env=None, set_name=''),
     #     SimpleNamespace(name='test_two_004', data_set={}, secrets={}, browser='firefox', reportdir=None, env=None, set_name='')
     # ]
     # assert execution_list == expected_list
     assert len(execution_list) == 4
     assert execution_list[0].browser == 'chrome'
     assert execution_list[1].browser == 'firefox'
     assert execution_list[2].browser == 'chrome'
     assert execution_list[3].browser == 'firefox'
コード例 #6
0
 def test_define_execution_list_multiple_tests(self,
                                               project_function_clean):
     """Verify that the execution list is generated properly when there
     are multiple tests in the list
     """
     _, project = project_function_clean.activate()
     # create test one
     test_name_one = 'test_one_001'
     test.create_test(project, test_name_one)
     tdata = [{
         'col1': 'a',
         'col2': 'b'
     }, {
         'col1': 'c',
         'col2': 'd',
     }]
     test_data.save_csv_test_data(project, test_name_one, tdata)
     # create test two
     test_name_two = 'test_two_001'
     test.create_test(project, test_name_two)
     execution_runner = exc_runner.ExecutionRunner(project)
     execution_runner.tests = [test_name_one, test_name_two]
     execution_runner.execution.processes = 1
     execution_runner.execution.browsers = ['chrome']
     execution_runner.execution.envs = []
     exec_list = execution_runner._define_execution_list()
     assert exec_list[0].name == 'test_one_001'
     assert exec_list[0].data_set == {'col1': 'a', 'col2': 'b'}
     assert exec_list[1].name == 'test_one_001'
     assert exec_list[1].data_set == {'col1': 'c', 'col2': 'd'}
     assert exec_list[2].name == 'test_two_001'
     assert exec_list[2].data_set == {}
コード例 #7
0
 def test_define_execution_list_multiple_data_sets(self,
                                                   project_function_clean):
     """Verify that the execution list is generated properly when a test
     has multiple data sets
     """
     _, project = project_function_clean.activate()
     test_name = 'test_002'
     test.create_test(project, test_name)
     tdata = [{
         'col1': 'a',
         'col2': 'b'
     }, {
         'col1': 'c',
         'col2': 'd',
     }]
     test_data.save_csv_test_data(project, test_name, tdata)
     execution_runner = exc_runner.ExecutionRunner(project)
     execution_runner.tests = [test_name]
     execution_runner.execution.processes = 1
     execution_runner.execution.browsers = ['chrome']
     execution_runner.execution.envs = []
     execution_list = execution_runner._define_execution_list()
     assert execution_list[0].data_set == {'col1': 'a', 'col2': 'b'}
     assert isinstance(execution_list[0].set_name,
                       str) and execution_list[0].set_name != ''
     assert execution_list[1].data_set == {'col1': 'c', 'col2': 'd'}
     assert isinstance(execution_list[1].set_name,
                       str) and execution_list[1].set_name != ''
コード例 #8
0
 def test_define_execution_list_with_secrets(self, project_function_clean):
     """Verify that the execution list is generated properly when there's only
     one test without datasets, one driver and zero environments
     """
     _, project = project_function_clean.activate()
     secrets = {"a": "secret", "b": "secret02"}
     secrets_path = os.path.join(project_function_clean.path,
                                 'secrets.json')
     with open(secrets_path, 'w') as secrets_file:
         secrets_file.write(json.dumps(secrets, indent=True))
     execution_runner = exc_runner.ExecutionRunner()
     execution_runner.tests = ['test_001']
     execution_runner.execution.processes = 1
     execution_runner.execution.browsers = ['chrome']
     execution_runner.execution.envs = []
     execution_runner.project = project
     execution_list = execution_runner._define_execution_list()
     expected_list = [
         SimpleNamespace(name='test_001',
                         data_set={},
                         secrets={
                             "a": "secret",
                             "b": "secret02"
                         },
                         browser='chrome',
                         reportdir=None,
                         env=None)
     ]
     assert execution_list == expected_list
コード例 #9
0
 def test__select_environments_all_envs_empty(self, project_function):
     """Verify that _select_environments uses the correct order
     of precedence when cli environments, suite environments and 
     project environments are empty"""
     _, project = project_function.activate()
     execution_runner = exc_runner.ExecutionRunner(project)
     execution_runner.cli_args.envs = []
     execution_runner.cli_args.envs = []
     project_envs = environment_manager.get_envs(project)
     result_envs = execution_runner._select_environments(project_envs)
     assert result_envs == []
コード例 #10
0
 def test_define_execution_list_multiple_tests(self,
                                               project_function_clean):
     """Verify that the execution list is generated properly when there
     are multiple tests in the list
     """
     _, project = project_function_clean.activate()
     # create test one
     test_name_one = 'test_one_001'
     test.create_test(project, test_name_one)
     tdata = [{
         'col1': 'a',
         'col2': 'b'
     }, {
         'col1': 'c',
         'col2': 'd',
     }]
     test_data.save_external_test_data_file(project, test_name_one, tdata)
     # create test two
     test_name_two = 'test_two_001'
     test.create_test(project, test_name_two)
     execution_runner = exc_runner.ExecutionRunner()
     execution_runner.tests = [test_name_one, test_name_two]
     execution_runner.execution.processes = 1
     execution_runner.execution.browsers = ['chrome']
     execution_runner.execution.envs = []
     execution_runner.project = project
     execution_list = execution_runner._define_execution_list()
     expected_list = [
         SimpleNamespace(name='test_one_001',
                         data_set={
                             'col1': 'a',
                             'col2': 'b'
                         },
                         secrets={},
                         browser='chrome',
                         reportdir=None,
                         env=None),
         SimpleNamespace(name='test_one_001',
                         data_set={
                             'col1': 'c',
                             'col2': 'd'
                         },
                         secrets={},
                         browser='chrome',
                         reportdir=None,
                         env=None),
         SimpleNamespace(name='test_two_001',
                         data_set={},
                         secrets={},
                         browser='chrome',
                         reportdir=None,
                         env=None)
     ]
     assert execution_list == expected_list
コード例 #11
0
 def test__select_environments(self, project_session):
     """Verify that _select_environments uses the correct order
     of precedence"""
     _, project = project_session.activate()
     cli_envs = ['cli_env_1', 'cli_env_2']
     execution_runner = exc_runner.ExecutionRunner(project)
     execution_runner.cli_args.envs = cli_envs
     execution_runner.suite.envs = ['suite_env_1', 'suite_env_2']
     project_envs = environment_manager.get_envs(project)
     result_envs = execution_runner._select_environments(project_envs)
     assert result_envs == cli_envs
コード例 #12
0
 def test__select_environments_cli_envs_empty_suite_envs_empty(
         self, project_function):
     """Verify that _select_environments uses the correct order
     of precedence when cli environments and suite environments are empty"""
     testdir, project = project_function.activate()
     execution_runner = exc_runner.ExecutionRunner(project)
     execution_runner.cli_args.envs = []
     execution_runner.suite.envs = []
     path = os.path.join(testdir, 'projects', project, 'environments.json')
     with open(path, 'w+') as f:
         f.write('{"env3": {}, "env4": {}}')
     project_envs = environment_manager.get_envs(project)
     result_envs = execution_runner._select_environments(project_envs)
     assert result_envs == ['env3']
コード例 #13
0
 def test_run_directory(self, _project_with_tags, capsys):
     _, project = _project_with_tags.activate()
     timestamp = utils.get_timestamp()
     execution_runner = exc_runner.ExecutionRunner(browsers=['chrome'],
                                                   timestamp=timestamp)
     execution_runner.project = project
     execution_runner.run_directory('foo')
     out, err = capsys.readouterr()
     assert 'Tests found: 2' in out
     data = exec_report.get_execution_data(project=project,
                                           suite='foo',
                                           execution=timestamp)
     assert data['has_finished'] is True
     assert data['total_tests'] == 2
コード例 #14
0
 def test_define_execution_list_multiple_tests_datasets_drivers_envs(
         self, project_function_clean):
     """Verify that the execution list is generated properly when there
     are multiple tests, data sets, drivers and environments
     """
     _, project = project_function_clean.activate()
     # create test one
     test_name_one = 'test_one_005'
     test.create_test(project, test_name_one)
     # test data for test one
     tdata = [{'col1': 'a'}, {'col1': 'b'}]
     test_data.save_csv_test_data(project, test_name_one, tdata)
     # create test two
     test_name_two = 'test_two_005'
     test.create_test(project, test_name_two)
     # create two environments
     env_data = {"stage": {"url": "xxx"}, "preview": {"url": "yyy"}}
     env_data_json = json.dumps(env_data)
     environment_manager.save_environments(project, env_data_json)
     execution_runner = exc_runner.ExecutionRunner(project)
     execution_runner.tests = [test_name_one, test_name_two]
     execution_runner.execution.processes = 1
     execution_runner.execution.browsers = ['chrome', 'firefox']
     execution_runner.execution.envs = ['stage', 'preview']
     ex = execution_runner._define_execution_list()
     assert ex[0].browser == 'chrome' and ex[0].env == 'stage' and \
            ex[0].data_set == {'col1': 'a', 'env': {'url': 'xxx', 'name': 'stage'}}
     assert ex[1].browser == 'firefox' and ex[1].env == 'stage' and \
            ex[1].data_set == {'col1': 'a', 'env': {'url': 'xxx', 'name': 'stage'}}
     assert ex[2].browser == 'chrome' and ex[2].env == 'preview' and \
            ex[2].data_set == {'col1': 'a', 'env': {'url': 'yyy', 'name': 'preview'}}
     assert ex[3].browser == 'firefox' and ex[3].env == 'preview' and \
            ex[3].data_set == {'col1': 'a', 'env': {'url': 'yyy', 'name': 'preview'}}
     assert ex[4].browser == 'chrome' and ex[4].env == 'stage' and \
            ex[4].data_set == {'col1': 'b', 'env': {'url': 'xxx', 'name': 'stage'}}
     assert ex[5].browser == 'firefox' and ex[5].env == 'stage' and \
            ex[5].data_set == {'col1': 'b', 'env': {'url': 'xxx', 'name': 'stage'}}
     assert ex[6].browser == 'chrome' and ex[6].env == 'preview' and \
            ex[6].data_set == {'col1': 'b', 'env': {'url': 'yyy', 'name': 'preview'}}
     assert ex[7].browser == 'firefox' and ex[7].env == 'preview' and \
            ex[7].data_set == {'col1': 'b', 'env': {'url': 'yyy', 'name': 'preview'}}
     assert ex[8].browser == 'chrome' and ex[8].env == 'stage' and \
            ex[8].data_set == {'env': {'url': 'xxx', 'name': 'stage'}}
     assert ex[9].browser == 'firefox' and ex[9].env == 'stage' and \
            ex[9].data_set == {'env': {'url': 'xxx', 'name': 'stage'}}
     assert ex[10].browser == 'chrome' and ex[10].env == 'preview' and \
            ex[10].data_set == {'env': {'url': 'yyy', 'name': 'preview'}}
     assert ex[11].browser == 'firefox' and ex[11].env == 'preview' and \
            ex[11].data_set == {'env': {'url': 'yyy', 'name': 'preview'}}
コード例 #15
0
 def test_run_single_test(self, project_class, test_utils):
     testdir, project = project_class.activate()
     test_name = 'foo001'
     timestamp = utils.get_timestamp()
     session.settings = settings_manager.get_project_settings(project)
     test_utils.create_test(project, test_name)
     execution_runner = exc_runner.ExecutionRunner(browsers=['chrome'],
                                                   timestamp=timestamp)
     execution_runner.project = project
     execution_runner.run_test(test_name)
     test_report_dir = os.path.join(testdir, 'projects', project, 'reports',
                                    'single_tests', test_name, timestamp)
     assert os.path.isdir(test_report_dir)
     items = os.listdir(test_report_dir)
     # test set dir + report.json
     assert len(items) == 2
コード例 #16
0
 def test_run_directory_without_tests(self, _project_with_tags, capsys):
     _, project = _project_with_tags.activate()
     timestamp = utils.get_timestamp()
     dirname = 'empty'
     execution_runner = exc_runner.ExecutionRunner(project,
                                                   browsers=['chrome'],
                                                   timestamp=timestamp)
     execution_runner.run_directory(dirname)
     out, err = capsys.readouterr()
     expected = f"No tests were found in {os.path.join('tests', dirname)}"
     assert expected in out
     data = exec_report.get_execution_data(project=project,
                                           execution=dirname,
                                           timestamp=timestamp)
     assert data['has_finished'] is True
     assert data['total_tests'] == 0
コード例 #17
0
 def test__select_environments_cli_envs_empty(self, project_function):
     """Verify that _select_environments uses the correct order
     of precedence when cli environments is empty"""
     testdir, project = project_function.activate()
     cli_envs = []
     suite_envs = ['suite_env_1', 'suite_env_2']
     execution_runner = exc_runner.ExecutionRunner()
     execution_runner.project = project
     execution_runner.cli_args.envs = cli_envs
     execution_runner.suite.envs = suite_envs
     path = os.path.join(testdir, 'environments.json')
     with open(path, 'w+') as f:
         f.write('{"env1": {}, "env2": {}}')
     project_envs = environment_manager.get_envs(project)
     result_envs = execution_runner._select_environments(project_envs)
     assert result_envs == suite_envs
コード例 #18
0
 def test__create_execution_directory_is_not_suite(self, project_class):
     """Verify that create_execution_directory works as expected when 
     a not suite is passed on
     """
     _, project = project_class.activate()
     test_name = 'foo'
     timestamp = utils.get_timestamp()
     execution_runner = exc_runner.ExecutionRunner()
     execution_runner.test_name = test_name
     execution_runner.project = project
     execution_runner.is_suite = False
     execution_runner.timestamp = timestamp
     execution_runner._create_execution_directory()
     expected_path = os.path.join(project_class.path, 'reports',
                                  'single_tests', test_name, timestamp)
     assert os.path.isdir(expected_path)
コード例 #19
0
 def test_run_directory_filter_by_tags(self, _project_with_tags, test_utils,
                                       capsys):
     _, project = _project_with_tags.activate()
     timestamp = utils.get_timestamp()
     dirname = 'foo'
     execution_runner = exc_runner.ExecutionRunner(browsers=['chrome'],
                                                   timestamp=timestamp,
                                                   tags=['alfa', 'bravo'])
     execution_runner.project = project
     execution_runner.run_directory(dirname)
     out, err = capsys.readouterr()
     assert 'Tests found: 1' in out
     data = exec_report.get_execution_data(project=project,
                                           suite=dirname,
                                           execution=timestamp)
     assert data['has_finished'] is True
     assert data['total_tests'] == 1
コード例 #20
0
 def test_run_suite_without_tests(self, _project_with_tags, test_utils,
                                  capsys):
     _, project = _project_with_tags.activate()
     suite_name = test_utils.random_numeric_string(10, 'suite')
     test_utils.create_suite(project, suite_name, tests=[])
     timestamp = utils.get_timestamp()
     execution_runner = exc_runner.ExecutionRunner(browsers=['chrome'],
                                                   timestamp=timestamp)
     execution_runner.project = project
     execution_runner.run_suite(suite_name)
     out, err = capsys.readouterr()
     assert 'No tests found for suite {}'.format(suite_name) in out
     data = exec_report.get_execution_data(project=project,
                                           suite=suite_name,
                                           execution=timestamp)
     assert data['has_finished'] is True
     assert data['total_tests'] == 0
コード例 #21
0
 def test__create_execution_directory_is_suite(self, project_class):
     """Verify that create_execution_directory works as expected when 
     a suite is passed on
     """
     _, project = project_class.activate()
     timestamp = utils.get_timestamp()
     suite_name = 'bar'
     execution_runner = exc_runner.ExecutionRunner()
     execution_runner.tests = ['test_foo']
     execution_runner.project = project
     execution_runner.is_suite = True
     execution_runner.suite_name = suite_name
     execution_runner.timestamp = timestamp
     execution_runner._create_execution_directory()
     expected_path = os.path.join(project_class.path, 'reports', suite_name,
                                  timestamp)
     assert os.path.isdir(expected_path)
コード例 #22
0
 def test_run_with_environments(self, project_function, test_utils, capsys):
     _, project = project_function.activate()
     environments = json.dumps({'test': {}, 'stage': {}})
     environment_manager.save_environments(project, environments)
     test_utils.create_test(project, 'test01')
     timestamp = utils.get_timestamp()
     execution_runner = exc_runner.ExecutionRunner(
         browsers=['chrome'],
         timestamp=timestamp,
         environments=['test', 'stage'])
     execution_runner.project = project
     execution_runner.run_directory('')
     out, err = capsys.readouterr()
     assert 'Tests found: 1 (2 sets)' in out
     data = exec_report.get_execution_data(project=project,
                                           suite='all',
                                           execution=timestamp)
     assert data['has_finished'] is True
     assert data['total_tests'] == 2
コード例 #23
0
 def test_run_suite(self, _project_with_tags, test_utils, capsys):
     _, project = _project_with_tags.activate()
     suite_name = test_utils.random_numeric_string(10, 'suite')
     tests = [
         _project_with_tags.t.test_alfa_bravo,
         _project_with_tags.t.test_bravo_charlie
     ]
     test_utils.create_suite(project, suite_name, tests=tests)
     timestamp = utils.get_timestamp()
     execution_runner = exc_runner.ExecutionRunner(project,
                                                   browsers=['chrome'],
                                                   timestamp=timestamp)
     execution_runner.run_suite(suite_name)
     out, err = capsys.readouterr()
     assert 'Tests found: 2' in out
     data = exec_report.get_execution_data(project=project,
                                           execution=suite_name,
                                           timestamp=timestamp)
     assert data['has_finished'] is True
     assert data['total_tests'] == 2
コード例 #24
0
 def test_define_execution_list(self, project_function_clean):
     """Verify that the execution list is generated properly when there's only
     one test without datasets, one driver and zero environments
     """
     project_function_clean.activate()
     execution_runner = exc_runner.ExecutionRunner()
     execution_runner.tests = ['test_001']
     execution_runner.execution.processes = 1
     execution_runner.execution.browsers = ['chrome']
     execution_runner.execution.envs = []
     execution_runner.project = project_function_clean.name
     execution_list = execution_runner._define_execution_list()
     expected_list = [
         SimpleNamespace(name='test_001',
                         data_set={},
                         secrets={},
                         browser='chrome',
                         reportdir=None,
                         env=None)
     ]
     assert execution_list == expected_list
コード例 #25
0
 def test_run_single_test_filter_by_tags(self, project_class, test_utils):
     """Run a single test with filtering by tags"""
     testdir, project = project_class.activate()
     test_name = 'foo003'
     timestamp = utils.get_timestamp()
     session.settings = settings_manager.get_project_settings(project)
     content = ('tags = ["alfa", "bravo"]\n'
                'def test(data):\n'
                '    pass\n')
     test_utils.create_test(project, test_name, content=content)
     execution_runner = exc_runner.ExecutionRunner(browsers=['chrome'],
                                                   timestamp=timestamp,
                                                   tags=['alfa'])
     execution_runner.project = project
     execution_runner.run_test(test_name)
     test_report_dir = os.path.join(testdir, 'projects', project, 'reports',
                                    'single_tests', test_name, timestamp)
     assert os.path.isdir(test_report_dir)
     items = os.listdir(test_report_dir)
     # test set dir + report.json
     assert len(items) == 2
コード例 #26
0
 def test_run_single_test_with_invalid_tags(self, project_class, test_utils,
                                            capsys):
     testdir, project = project_class.activate()
     test_name = 'foo004'
     timestamp = utils.get_timestamp()
     content = ('tags = ["alfa", "bravo"]\n'
                'def test(data):\n'
                '    pass\n')
     test_utils.create_test(project, test_name, content=content)
     execution_runner = exc_runner.ExecutionRunner(browsers=['chrome'],
                                                   timestamp=timestamp,
                                                   tags=['charlie'])
     execution_runner.project = project
     execution_runner.run_test(test_name)
     out, err = capsys.readouterr()
     assert 'No tests found with tag(s): charlie' in out
     test_report_dir = os.path.join(testdir, 'projects', project, 'reports',
                                    'single_tests', test_name, timestamp)
     assert os.path.isdir(test_report_dir)
     items = os.listdir(test_report_dir)
     # only report.json is present
     assert items == ['report.json']
コード例 #27
0
    def test_initialize_reports_for_test_files(self, project_class,
                                               test_utils):
        """test file json reports are initialized with status pending
        for each test function"""
        _, project = project_class.activate()
        test_name = test_utils.random_string()
        content = 'def test_one(data):\n' \
                  '    pass\n' \
                  'def test_two(data):\n' \
                  '    pass'
        test_utils.create_test(project, test_name, content)

        timestamp = utils.get_timestamp()
        execution_name = test_name

        execution_runner = exc_runner.ExecutionRunner(project,
                                                      timestamp=timestamp)
        execution_runner.tests = [test_name]
        execution_runner.execution.processes = 1
        execution_runner.execution.browsers = exc_runner.define_browsers(
            ['chrome'], [], ['chrome'], [])
        execution_runner.execution.envs = []
        execution_runner.execution_name = execution_name
        execution_runner.execution.reportdir = execution_runner._create_execution_directory(
        )
        execution_list = execution_runner._define_execution_list()

        exc_runner.initialize_reports_for_test_files(project, execution_list)

        test_file_report = test_report.get_test_file_report_json(
            project, execution_name, timestamp, test_name)

        assert len(test_file_report) == 2
        assert any(
            t['test'] == 'test_one' and t['result'] == ResultsEnum.PENDING
            for t in test_file_report)
        assert any(
            t['test'] == 'test_two' and t['result'] == ResultsEnum.PENDING
            for t in test_file_report)
コード例 #28
0
 def test_define_execution_list_multiple_tests_datasets_drivers_envs(
         self, project_function_clean):
     """Verify that the execution list is generated properly when there
     are multiple tests, data sets, drivers and environments
     """
     _, project = project_function_clean.activate()
     # create test one
     test_name_one = 'test_one_005'
     test.create_test(project, test_name_one)
     # test data for test one
     tdata = [{'col1': 'a'}, {'col1': 'b'}]
     test_data.save_external_test_data_file(project, test_name_one, tdata)
     # create test two
     test_name_two = 'test_two_005'
     test.create_test(project, test_name_two)
     # create two environments
     env_data = {"stage": {"url": "xxx"}, "preview": {"url": "yyy"}}
     env_data_json = json.dumps(env_data)
     environment_manager.save_environments(project, env_data_json)
     execution_runner = exc_runner.ExecutionRunner()
     execution_runner.tests = [test_name_one, test_name_two]
     execution_runner.execution.processes = 1
     execution_runner.execution.browsers = ['chrome', 'firefox']
     execution_runner.execution.envs = ['stage', 'preview']
     execution_runner.project = project
     execution_list = execution_runner._define_execution_list()
     expected_list = [
         SimpleNamespace(browser='chrome',
                         data_set={
                             'col1': 'a',
                             'env': {
                                 'url': 'xxx',
                                 'name': 'stage'
                             }
                         },
                         secrets={},
                         name='test_one_005',
                         reportdir=None,
                         env='stage'),
         SimpleNamespace(browser='firefox',
                         data_set={
                             'col1': 'a',
                             'env': {
                                 'url': 'xxx',
                                 'name': 'stage'
                             }
                         },
                         secrets={},
                         name='test_one_005',
                         reportdir=None,
                         env='stage'),
         SimpleNamespace(browser='chrome',
                         data_set={
                             'col1': 'a',
                             'env': {
                                 'url': 'yyy',
                                 'name': 'preview'
                             }
                         },
                         secrets={},
                         name='test_one_005',
                         reportdir=None,
                         env='preview'),
         SimpleNamespace(browser='firefox',
                         data_set={
                             'col1': 'a',
                             'env': {
                                 'url': 'yyy',
                                 'name': 'preview'
                             }
                         },
                         secrets={},
                         name='test_one_005',
                         reportdir=None,
                         env='preview'),
         SimpleNamespace(browser='chrome',
                         data_set={
                             'col1': 'b',
                             'env': {
                                 'url': 'xxx',
                                 'name': 'stage'
                             }
                         },
                         secrets={},
                         name='test_one_005',
                         reportdir=None,
                         env='stage'),
         SimpleNamespace(browser='firefox',
                         data_set={
                             'col1': 'b',
                             'env': {
                                 'url': 'xxx',
                                 'name': 'stage'
                             }
                         },
                         secrets={},
                         name='test_one_005',
                         reportdir=None,
                         env='stage'),
         SimpleNamespace(browser='chrome',
                         data_set={
                             'col1': 'b',
                             'env': {
                                 'url': 'yyy',
                                 'name': 'preview'
                             }
                         },
                         secrets={},
                         name='test_one_005',
                         reportdir=None,
                         env='preview'),
         SimpleNamespace(browser='firefox',
                         data_set={
                             'col1': 'b',
                             'env': {
                                 'url': 'yyy',
                                 'name': 'preview'
                             }
                         },
                         secrets={},
                         name='test_one_005',
                         reportdir=None,
                         env='preview'),
         SimpleNamespace(browser='chrome',
                         data_set={'env': {
                             'url': 'xxx',
                             'name': 'stage'
                         }},
                         secrets={},
                         name='test_two_005',
                         reportdir=None,
                         env='stage'),
         SimpleNamespace(browser='firefox',
                         data_set={'env': {
                             'url': 'xxx',
                             'name': 'stage'
                         }},
                         secrets={},
                         name='test_two_005',
                         reportdir=None,
                         env='stage'),
         SimpleNamespace(
             browser='chrome',
             data_set={'env': {
                 'url': 'yyy',
                 'name': 'preview'
             }},
             secrets={},
             name='test_two_005',
             reportdir=None,
             env='preview'),
         SimpleNamespace(
             browser='firefox',
             data_set={'env': {
                 'url': 'yyy',
                 'name': 'preview'
             }},
             secrets={},
             name='test_two_005',
             reportdir=None,
             env='preview')
     ]
     assert execution_list == expected_list