Example #1
0
 def test_define_execution_list_multiple_envs(self, project_function_clean):
     """Verify that the execution list is generated properly when the execution
     has multiple envs
     """
     _, project = project_function_clean.activate()
     # create test one
     test_name_one = 'test_one_003'
     test.create_test(project, test_name_one)
     # create two environments in environments.json
     env_data = {"stage": {"url": "xxx"}, "preview": {"url": "yyy"}}
     env_data_json = json.dumps(env_data)
     environment_manager.save_environments(project, env_data_json)
     execution_runner = exc_runner.ExecutionRunner(project)
     execution_runner.tests = [test_name_one]
     execution_runner.execution.processes = 1
     execution_runner.execution.browsers = ['chrome']
     execution_runner.execution.envs = ['stage', 'preview']
     exec_list = execution_runner._define_execution_list()
     assert exec_list[0].data_set == {
         'env': {
             'url': 'xxx',
             'name': 'stage'
         }
     }
     assert exec_list[0].env == 'stage'
     assert exec_list[1].data_set == {
         'env': {
             'url': 'yyy',
             'name': 'preview'
         }
     }
     assert exec_list[1].env == 'preview'
Example #2
0
def save_environments():
    if request.method == 'POST':
        projectname = request.json['project']
        env_data = request.json['environmentData']
        result = {'result': 'ok', 'errors': []}
        environment_manager.save_environments(projectname, env_data)
        return json.dumps(result)
 def test_define_execution_list_multiple_envs(self, project_function_clean):
     """Verify that the execution list is generated properly when the execution
     has multiple envs
     """
     _, project = project_function_clean.activate()
     # create test one
     test_name_one = 'test_one_003'
     parents = []
     test_case.new_test_case(project, parents, test_name_one)
     # create two environments in environments.json
     env_data = {
         "stage": {"url": "xxx"},
         "preview": {"url": "yyy"}
     }
     env_data_json = json.dumps(env_data)
     environment_manager.save_environments(project, env_data_json)
     execution_runner = exc_runner.ExecutionRunner()
     execution_runner.tests = [test_name_one]
     execution_runner.execution.processes = 1
     execution_runner.execution.browsers = ['chrome']
     execution_runner.execution.envs = ['stage', 'preview']
     execution_runner.project = project
     execution_list = execution_runner._define_execution_list()
     expected_list = [
         SimpleNamespace(name='test_one_003', data_set={'env': {'url': 'xxx', 'name': 'stage'}}, secrets={},
                         browser='chrome', reportdir=None),
         SimpleNamespace(name='test_one_003', data_set={'env': {'url': 'yyy', 'name': 'preview'}}, secrets={},
                         browser='chrome', reportdir=None)
     ]
     assert execution_list == expected_list
    def test_define_execution_list_multiple_tests_datasets_drivers_envs(
            self, project_function_clean):
        """Verify that the execution list is generated properly when there
        are multiple tests, data sets, drivers and environments
        """
        _, project = project_function_clean.activate()
        # create test one
        test_name_one = 'test_one_005'
        parents = []
        test_case.new_test_case(project, parents, test_name_one)
        # test data for test one
        tdata = [
            {
                'col1': 'a',
            },
            {
                'col1': 'b',
            }

        ]
        test_data.save_external_test_data_file(project, test_name_one, tdata)
        # create test two
        test_name_two = 'test_two_005'
        parents = []
        test_case.new_test_case(project, parents, test_name_two)
        # create two environments
        env_data = {
            "stage": {
                "url": "xxx"
            },
            "preview": {
                "url": "yyy"
            }
        }
        env_data_json = json.dumps(env_data)
        environment_manager.save_environments(project, env_data_json)
        execution_runner = exc_runner.ExecutionRunner()
        execution_runner.tests = [test_name_one, test_name_two]
        execution_runner.execution.processes = 1
        execution_runner.execution.browsers = ['chrome', 'firefox']
        execution_runner.execution.envs = ['stage', 'preview']
        execution_runner.project = project
        execution_list = execution_runner._define_execution_list()
        expected_list = [
            SimpleNamespace(browser='chrome', data_set={'col1': 'a', 'env': {'url': 'xxx', 'name': 'stage'}}, secrets={}, name='test_one_005', reportdir=None),
            SimpleNamespace(browser='firefox', data_set={'col1': 'a', 'env': {'url': 'xxx', 'name': 'stage'}}, secrets={}, name='test_one_005', reportdir=None),
            SimpleNamespace(browser='chrome', data_set={'col1': 'a', 'env': {'url': 'yyy', 'name': 'preview'}}, secrets={}, name='test_one_005', reportdir=None),
            SimpleNamespace(browser='firefox', data_set={'col1': 'a', 'env': {'url': 'yyy', 'name': 'preview'}}, secrets={}, name='test_one_005', reportdir=None),
            SimpleNamespace(browser='chrome', data_set={'col1': 'b', 'env': {'url': 'xxx', 'name': 'stage'}}, secrets={}, name='test_one_005', reportdir=None),
            SimpleNamespace(browser='firefox', data_set={'col1': 'b', 'env': {'url': 'xxx', 'name': 'stage'}}, secrets={}, name='test_one_005', reportdir=None),
            SimpleNamespace(browser='chrome', data_set={'col1': 'b', 'env': {'url': 'yyy', 'name': 'preview'}}, secrets={}, name='test_one_005', reportdir=None),
            SimpleNamespace(browser='firefox', data_set={'col1': 'b', 'env': {'url': 'yyy', 'name': 'preview'}}, secrets={}, name='test_one_005', reportdir=None),
            SimpleNamespace(browser='chrome', data_set={'env': {'url': 'xxx', 'name': 'stage'}}, secrets={}, name='test_two_005', reportdir=None),
            SimpleNamespace(browser='firefox', data_set={'env': {'url': 'xxx', 'name': 'stage'}}, secrets={}, name='test_two_005', reportdir=None),
            SimpleNamespace(browser='chrome', data_set={'env': {'url': 'yyy', 'name': 'preview'}}, secrets={}, name='test_two_005', reportdir=None),
            SimpleNamespace(browser='firefox', data_set={'env': {'url': 'yyy','name': 'preview'}}, secrets={}, name='test_two_005', reportdir=None)
        ]
        assert execution_list == expected_list
Example #5
0
    def test_define_execution_list_multiple_envs(self, project_function_clean):
        """Verify that the execution list is generated properly when the execution
        has multiple envs
        """
        root_path = project_function_clean['testdir']
        project = project_function_clean['name']
        os.chdir(root_path)
        # create test one
        test_name_one = 'test_one_003'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name_one)

        # create two environments in environments.json
        env_data = {"stage": {"url": "xxx"}, "preview": {"url": "yyy"}}
        env_data_json = json.dumps(env_data)
        environment_manager.save_environments(root_path, project,
                                              env_data_json)

        execution = {
            'tests': [test_name_one],
            'workers': 1,
            'drivers': ['chrome'],
            'environments': ['stage', 'preview'],
            'suite_before': None,
            'suite_after': None
        }

        execution_list = start_execution._define_execution_list(
            root_path, project, execution)

        expected_list = [
            {
                'test_name': 'test_one_003',
                'data_set': {
                    'env': {
                        'url': 'xxx',
                        'name': 'stage'
                    }
                },
                'driver': 'chrome',
                'report_directory': None
            },
            {
                'test_name': 'test_one_003',
                'data_set': {
                    'env': {
                        'url': 'yyy',
                        'name': 'preview'
                    }
                },
                'driver': 'chrome',
                'report_directory': None
            },
        ]
        assert execution_list == expected_list
    def test_define_execution_list_multiple_envs(self, testdir_fixture):
        """Verify that the execution list is generated properly when the execution
        has multiple envs
        """
        root_path = testdir_fixture['path']
        project = create_random_project(root_path)
        # create test one
        test_name_one = 'new_test_case_one'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name_one)

        # create two environments in environments.json
        env_data = {
            "stage": {
                "url": "xxx"
            },
            "preview": {
                "url": "yyy"
            }
        }
        env_data_json = json.dumps(env_data)
        environment_manager.save_environments(root_path, project, env_data_json)

        execution = {
            'tests': [test_name_one],
            'workers': 1,
            'drivers': ['chrome'],
            'environments': ['stage', 'preview'],
            'suite_before': None,
            'suite_after': None
        }

        execution_list = start_execution._define_execution_list(root_path, project,
                                                                execution)
        
        expected_list = [
            {
                'test_name': 'new_test_case_one',
                'data_set': {'env': {'url': 'xxx', 'name': 'stage'}},
                'driver': 'chrome',
                'report_directory': None
            },
            {
                'test_name': 'new_test_case_one',
                'data_set': {'env': {'url': 'yyy', 'name': 'preview'}},
                'driver': 'chrome',
                'report_directory': None
            },
        ]
        assert execution_list == expected_list
Example #7
0
 def test_define_execution_list_multiple_tests_datasets_drivers_envs(
         self, project_function_clean):
     """Verify that the execution list is generated properly when there
     are multiple tests, data sets, drivers and environments
     """
     _, project = project_function_clean.activate()
     # create test one
     test_name_one = 'test_one_005'
     test.create_test(project, test_name_one)
     # test data for test one
     tdata = [{'col1': 'a'}, {'col1': 'b'}]
     test_data.save_csv_test_data(project, test_name_one, tdata)
     # create test two
     test_name_two = 'test_two_005'
     test.create_test(project, test_name_two)
     # create two environments
     env_data = {"stage": {"url": "xxx"}, "preview": {"url": "yyy"}}
     env_data_json = json.dumps(env_data)
     environment_manager.save_environments(project, env_data_json)
     execution_runner = exc_runner.ExecutionRunner(project)
     execution_runner.tests = [test_name_one, test_name_two]
     execution_runner.execution.processes = 1
     execution_runner.execution.browsers = ['chrome', 'firefox']
     execution_runner.execution.envs = ['stage', 'preview']
     ex = execution_runner._define_execution_list()
     assert ex[0].browser == 'chrome' and ex[0].env == 'stage' and \
            ex[0].data_set == {'col1': 'a', 'env': {'url': 'xxx', 'name': 'stage'}}
     assert ex[1].browser == 'firefox' and ex[1].env == 'stage' and \
            ex[1].data_set == {'col1': 'a', 'env': {'url': 'xxx', 'name': 'stage'}}
     assert ex[2].browser == 'chrome' and ex[2].env == 'preview' and \
            ex[2].data_set == {'col1': 'a', 'env': {'url': 'yyy', 'name': 'preview'}}
     assert ex[3].browser == 'firefox' and ex[3].env == 'preview' and \
            ex[3].data_set == {'col1': 'a', 'env': {'url': 'yyy', 'name': 'preview'}}
     assert ex[4].browser == 'chrome' and ex[4].env == 'stage' and \
            ex[4].data_set == {'col1': 'b', 'env': {'url': 'xxx', 'name': 'stage'}}
     assert ex[5].browser == 'firefox' and ex[5].env == 'stage' and \
            ex[5].data_set == {'col1': 'b', 'env': {'url': 'xxx', 'name': 'stage'}}
     assert ex[6].browser == 'chrome' and ex[6].env == 'preview' and \
            ex[6].data_set == {'col1': 'b', 'env': {'url': 'yyy', 'name': 'preview'}}
     assert ex[7].browser == 'firefox' and ex[7].env == 'preview' and \
            ex[7].data_set == {'col1': 'b', 'env': {'url': 'yyy', 'name': 'preview'}}
     assert ex[8].browser == 'chrome' and ex[8].env == 'stage' and \
            ex[8].data_set == {'env': {'url': 'xxx', 'name': 'stage'}}
     assert ex[9].browser == 'firefox' and ex[9].env == 'stage' and \
            ex[9].data_set == {'env': {'url': 'xxx', 'name': 'stage'}}
     assert ex[10].browser == 'chrome' and ex[10].env == 'preview' and \
            ex[10].data_set == {'env': {'url': 'yyy', 'name': 'preview'}}
     assert ex[11].browser == 'firefox' and ex[11].env == 'preview' and \
            ex[11].data_set == {'env': {'url': 'yyy', 'name': 'preview'}}
Example #8
0
def save_environments():
    if request.method == 'POST':
        projectname = request.json['project']
        env_data = request.json['environmentData']
        error = environment_manager.save_environments(root_path, projectname,
                                                      env_data)
        return json.dumps(error)
 def test_run_with_environments(self, project_function, test_utils, capsys):
     _, project = project_function.activate()
     environments = json.dumps({'test': {}, 'stage': {}})
     environment_manager.save_environments(project, environments)
     test_utils.create_test(project, [], 'test01')
     timestamp = utils.get_timestamp()
     execution_runner = exc_runner.ExecutionRunner(browsers=['chrome'],
                                                   timestamp=timestamp,
                                                   environments=['test', 'stage'])
     execution_runner.project = project
     execution_runner.run_directory('')
     out, err = capsys.readouterr()
     assert 'Tests found: 1 (2 sets)' in out
     data = report_parser.get_execution_data(project=project, suite='all', execution=timestamp)
     assert data['has_finished'] is True
     assert data['total_tests'] == 2
 def test_save_environments_empty_env_data(self, project_session):
     _, project = project_session.activate()
     error = environment_manager.save_environments(project, '')
     assert error == ''
     env_json_path = os.path.join(project_session.path, 'environments.json')
     with open(env_json_path) as json_file:
         file_content = json_file.read()
         assert file_content == ''
 def test_save_environments_empty_env_data(self, project_session):
     project = project_session['name']
     testdir = project_session['testdir']
     error = environment_manager.save_environments(testdir, project, '')
     assert error == ''
     env_json_path = os.path.join(testdir, 'projects', project,
                                  'environments.json')
     with open(env_json_path) as json_file:
         file_content = json_file.read()
         assert file_content == ''
Example #12
0
 def test_save_environments(self, project_session):
     project = project_session.name
     testdir = project_session.testdir
     error = environment_manager.save_environments(testdir, project,
                                                   ENV_DATA)
     assert error == ''
     env_json_path = os.path.join(project_session.path, 'environments.json')
     with open(env_json_path) as json_file:
         file_content = json_file.read()
         assert file_content == ENV_DATA
Example #13
0
 def test_save_environments(self, permanent_project_fixture):
     project = permanent_project_fixture['name']
     testdir = permanent_project_fixture['testdir']
     env_json_path = os.path.join(testdir, 'projects', project,
                                  'environments.json')
     error = environment_manager.save_environments(testdir, project,
                                                   ENV_DATA)
     assert error == ''
     with open(env_json_path) as json_file:
         file_content = json_file.read()
         assert file_content == ENV_DATA
 def test_save_environments_invalid_json(self, project_function):
     _, project = project_function.activate()
     env_json_path = os.path.join(project_function.path,
                                  'environments.json')
     original_json = '{"test": "value"}'
     with open(env_json_path, 'w') as json_file:
         json_file.write(original_json)
     error = environment_manager.save_environments(project,
                                                   ENV_DATA_INVALID_JSON)
     assert error == 'must be valid JSON'
     # assert the original environments.json file was not modified
     with open(env_json_path) as json_file:
         file_content = json_file.read()
         assert file_content == original_json
Example #15
0
 def test_save_environments_invalid_json(self, permanent_project_fixture):
     project = permanent_project_fixture['name']
     testdir = permanent_project_fixture['testdir']
     env_json_path = os.path.join(testdir, 'projects', project,
                                  'environments.json')
     original_json = '{"test": ""}'
     with open(env_json_path, 'w') as json_file:
         file_content = json_file.write(original_json)
     error = environment_manager.save_environments(testdir, project,
                                                   ENV_DATA_INVALID_JSON)
     assert error == 'must be valid JSON'
     # assert the original environments.json file was not modified
     with open(env_json_path) as json_file:
         file_content = json_file.read()
         assert file_content == original_json
Example #16
0
def project_environments_save():
    project = request.json['project']
    env_data = request.json['environmentData']
    _verify_permissions(Permissions.ADMIN, project)
    error = environment_manager.save_environments(project, env_data)
    return jsonify({'error': error})
Example #17
0
def save_environments():
    if request.method == 'POST':
        projectname = request.json['project']
        env_data = request.json['environmentData']
        error = environment_manager.save_environments(root_path, projectname, env_data)
        return json.dumps(error)
Example #18
0
    def test_define_execution_list_multiple_tests_datasets_drivers_envs(
            self, testdir_fixture):
        """Verify that the execution list is generated properly when there
        are multiple tests, data sets, drivers and environments
        """
        root_path = testdir_fixture['path']
        project = create_random_project(root_path)
        # create test one
        test_name_one = 'new_test_case_one'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name_one)
        # test data for test one
        tdata = [{
            'col1': 'a',
        }, {
            'col1': 'b',
        }]
        test_data.save_external_test_data_file(root_path, project,
                                               test_name_one, tdata)
        # create test two
        test_name_two = 'new_test_case_two'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name_two)

        # create two environments
        env_data = {"stage": {"url": "xxx"}, "preview": {"url": "yyy"}}
        env_data_json = json.dumps(env_data)
        environment_manager.save_environments(root_path, project,
                                              env_data_json)

        execution = {
            'tests': [test_name_one, test_name_two],
            'workers': 1,
            'drivers': ['chrome', 'firefox'],
            'environments': ['stage', 'preview'],
            'suite_before': None,
            'suite_after': None
        }

        execution_list = start_execution._define_execution_list(
            root_path, project, execution)
        expected_list = [{
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'a',
                'env': {
                    'url': 'xxx',
                    'name': 'stage'
                }
            },
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'a',
                'env': {
                    'url': 'xxx',
                    'name': 'stage'
                }
            },
            'driver': 'firefox',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'a',
                'env': {
                    'url': 'yyy',
                    'name': 'preview'
                }
            },
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'a',
                'env': {
                    'url': 'yyy',
                    'name': 'preview'
                }
            },
            'driver': 'firefox',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'b',
                'env': {
                    'url': 'xxx',
                    'name': 'stage'
                }
            },
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'b',
                'env': {
                    'url': 'xxx',
                    'name': 'stage'
                }
            },
            'driver': 'firefox',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'b',
                'env': {
                    'url': 'yyy',
                    'name': 'preview'
                }
            },
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'b',
                'env': {
                    'url': 'yyy',
                    'name': 'preview'
                }
            },
            'driver': 'firefox',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_two',
            'data_set': {
                'env': {
                    'url': 'xxx',
                    'name': 'stage'
                }
            },
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_two',
            'data_set': {
                'env': {
                    'url': 'xxx',
                    'name': 'stage'
                }
            },
            'driver': 'firefox',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_two',
            'data_set': {
                'env': {
                    'url': 'yyy',
                    'name': 'preview'
                }
            },
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_two',
            'data_set': {
                'env': {
                    'url': 'yyy',
                    'name': 'preview'
                }
            },
            'driver': 'firefox',
            'report_directory': None
        }]

        assert execution_list == expected_list