Exemple #1
0
    def test_define_execution_list_multiple_tests(self, testdir_fixture):
        """Verify that the execution list is generated properly when there
        are multiple tests in the list
        """
        root_path = testdir_fixture['path']
        project = create_random_project(root_path)
        # create test one
        test_name_one = 'new_test_case_one'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name_one)
        tdata = [{
            'col1': 'a',
            'col2': 'b'
        }, {
            'col1': 'c',
            'col2': 'd',
        }]
        test_data.save_external_test_data_file(root_path, project,
                                               test_name_one, tdata)

        # create test two
        test_name_two = 'new_test_case_two'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name_two)

        execution = {
            'tests': [test_name_one, test_name_two],
            'workers': 1,
            'drivers': ['chrome'],
            'environments': [''],
            'suite_before': None,
            'suite_after': None
        }

        execution_list = start_execution._define_execution_list(
            root_path, project, execution)

        expected_list = [{
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'a',
                'col2': 'b'
            },
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'c',
                'col2': 'd'
            },
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_two',
            'data_set': {},
            'driver': 'chrome',
            'report_directory': None
        }]
        assert execution_list == expected_list
    def test_define_execution_list(self, testdir_fixture):
        """Verify that the execution list is generated properly when there's only
        one test without datasets, one driver and zero environments
        """
        root_path = testdir_fixture['path']
        project = create_random_project(root_path)
        test_name = 'new_test_case_001'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name)

        execution = {
            'tests': [test_name],
            'workers': 1,
            'drivers': ['chrome'],
            'environments': [''],
            'suite_before': None,
            'suite_after': None
        }

        execution_list = start_execution._define_execution_list(root_path, project, execution)
        
        expected_list = [
            {
                'test_name': 'new_test_case_001',
                'data_set': {},
                'driver': 'chrome',
                'report_directory': None
            }
        ]
        assert execution_list == expected_list
Exemple #3
0
    def test_define_execution_list(self, testdir_fixture):
        """Verify that the execution list is generated properly when there's only
        one test without datasets, one driver and zero environments
        """
        root_path = testdir_fixture['path']
        project = create_random_project(root_path)
        test_name = 'new_test_case_001'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name)

        execution = {
            'tests': [test_name],
            'workers': 1,
            'drivers': ['chrome'],
            'environments': [''],
            'suite_before': None,
            'suite_after': None
        }

        execution_list = start_execution._define_execution_list(
            root_path, project, execution)

        expected_list = [{
            'test_name': 'new_test_case_001',
            'data_set': {},
            'driver': 'chrome',
            'report_directory': None
        }]
        assert execution_list == expected_list
Exemple #4
0
    def test_define_execution_list_multiple_envs(self, project_function_clean):
        """Verify that the execution list is generated properly when the execution
        has multiple envs
        """
        root_path = project_function_clean['testdir']
        project = project_function_clean['name']
        os.chdir(root_path)
        # create test one
        test_name_one = 'test_one_003'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name_one)

        # create two environments in environments.json
        env_data = {"stage": {"url": "xxx"}, "preview": {"url": "yyy"}}
        env_data_json = json.dumps(env_data)
        environment_manager.save_environments(root_path, project,
                                              env_data_json)

        execution = {
            'tests': [test_name_one],
            'workers': 1,
            'drivers': ['chrome'],
            'environments': ['stage', 'preview'],
            'suite_before': None,
            'suite_after': None
        }

        execution_list = start_execution._define_execution_list(
            root_path, project, execution)

        expected_list = [
            {
                'test_name': 'test_one_003',
                'data_set': {
                    'env': {
                        'url': 'xxx',
                        'name': 'stage'
                    }
                },
                'driver': 'chrome',
                'report_directory': None
            },
            {
                'test_name': 'test_one_003',
                'data_set': {
                    'env': {
                        'url': 'yyy',
                        'name': 'preview'
                    }
                },
                'driver': 'chrome',
                'report_directory': None
            },
        ]
        assert execution_list == expected_list
    def test_define_execution_list_multiple_drivers(self, testdir_fixture):
        """Verify that the execution list is generated properly when there
        are multiple drivers in the list
        """
        root_path = testdir_fixture['path']
        project = create_random_project(root_path)
        # create test one
        test_name_one = 'new_test_case_one'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name_one)
        # create test two
        test_name_two = 'new_test_case_two'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name_two)

        execution = {
            'tests': [test_name_one, test_name_two],
            'workers': 1,
            'drivers': ['chrome', 'firefox'],
            'environments': [''],
            'suite_before': None,
            'suite_after': None
        }

        execution_list = start_execution._define_execution_list(root_path, project,
                                                                execution)
        
        expected_list = [
            {
                'test_name': 'new_test_case_one',
                'data_set': {},
                'driver': 'chrome',
                'report_directory': None
            },
            {
                'test_name': 'new_test_case_one',
                'data_set': {},
                'driver': 'firefox',
                'report_directory': None
            },
            {
                'test_name': 'new_test_case_two',
                'data_set': {},
                'driver': 'chrome',
                'report_directory': None
            },
            {
                'test_name': 'new_test_case_two',
                'data_set': {},
                'driver': 'firefox',
                'report_directory': None
            }
        ]
        assert execution_list == expected_list
Exemple #6
0
    def test_define_execution_list_multiple_data_sets(self,
                                                      project_function_clean):
        """Verify that the execution list is generated properly when a test
        has multiple data sets
        """
        root_path = project_function_clean['testdir']
        project = project_function_clean['name']
        os.chdir(root_path)
        test_name = 'new_test_case_002'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name)

        tdata = [{
            'col1': 'a',
            'col2': 'b'
        }, {
            'col1': 'c',
            'col2': 'd',
        }]
        test_data.save_external_test_data_file(root_path, project, test_name,
                                               tdata)

        execution = {
            'tests': [test_name],
            'workers': 1,
            'drivers': ['chrome'],
            'environments': [''],
            'suite_before': None,
            'suite_after': None
        }

        execution_list = start_execution._define_execution_list(
            root_path, project, execution)

        expected_list = [{
            'test_name': 'new_test_case_002',
            'data_set': {
                'col1': 'a',
                'col2': 'b'
            },
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_002',
            'data_set': {
                'col1': 'c',
                'col2': 'd'
            },
            'driver': 'chrome',
            'report_directory': None
        }]
        assert execution_list == expected_list
Exemple #7
0
    def test_define_execution_list_multiple_drivers(self,
                                                    project_function_clean):
        """Verify that the execution list is generated properly when there
        are multiple drivers in the list
        """
        root_path = project_function_clean['testdir']
        project = project_function_clean['name']
        os.chdir(root_path)
        # create test one
        test_name_one = 'test_one_004'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name_one)
        # create test two
        test_name_two = 'test_two_004'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name_two)

        execution = {
            'tests': [test_name_one, test_name_two],
            'workers': 1,
            'drivers': ['chrome', 'firefox'],
            'environments': [''],
            'suite_before': None,
            'suite_after': None
        }

        execution_list = start_execution._define_execution_list(
            root_path, project, execution)

        expected_list = [{
            'test_name': 'test_one_004',
            'data_set': {},
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'test_one_004',
            'data_set': {},
            'driver': 'firefox',
            'report_directory': None
        }, {
            'test_name': 'test_two_004',
            'data_set': {},
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'test_two_004',
            'data_set': {},
            'driver': 'firefox',
            'report_directory': None
        }]
        assert execution_list == expected_list
    def test_define_execution_list_multiple_envs(self, testdir_fixture):
        """Verify that the execution list is generated properly when the execution
        has multiple envs
        """
        root_path = testdir_fixture['path']
        project = create_random_project(root_path)
        # create test one
        test_name_one = 'new_test_case_one'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name_one)

        # create two environments in environments.json
        env_data = {
            "stage": {
                "url": "xxx"
            },
            "preview": {
                "url": "yyy"
            }
        }
        env_data_json = json.dumps(env_data)
        environment_manager.save_environments(root_path, project, env_data_json)

        execution = {
            'tests': [test_name_one],
            'workers': 1,
            'drivers': ['chrome'],
            'environments': ['stage', 'preview'],
            'suite_before': None,
            'suite_after': None
        }

        execution_list = start_execution._define_execution_list(root_path, project,
                                                                execution)
        
        expected_list = [
            {
                'test_name': 'new_test_case_one',
                'data_set': {'env': {'url': 'xxx', 'name': 'stage'}},
                'driver': 'chrome',
                'report_directory': None
            },
            {
                'test_name': 'new_test_case_one',
                'data_set': {'env': {'url': 'yyy', 'name': 'preview'}},
                'driver': 'chrome',
                'report_directory': None
            },
        ]
        assert execution_list == expected_list
    def test_define_execution_list_multiple_data_sets(self, testdir_fixture):
        """Verify that the execution list is generated properly when a test
        has multiple data sets
        """
        root_path = testdir_fixture['path']
        project = create_random_project(root_path)
        test_name = 'new_test_case_002'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name)

        tdata = [
            {
                'col1': 'a',
                'col2': 'b'
            },
            {
                'col1': 'c',
                'col2': 'd',
            }

        ]
        test_data.save_external_test_data_file(root_path, project, test_name, tdata)

        execution = {
            'tests': [test_name],
            'workers': 1,
            'drivers': ['chrome'],
            'environments': [''],
            'suite_before': None,
            'suite_after': None
        }

        execution_list = start_execution._define_execution_list(root_path, project,
                                                                execution)
        
        expected_list = [
            {
                'test_name': 'new_test_case_002',
                'data_set': {'col1': 'a', 'col2': 'b'},
                'driver': 'chrome',
                'report_directory': None
            },
            {
                'test_name': 'new_test_case_002',
                'data_set': {'col1': 'c', 'col2': 'd'},
                'driver': 'chrome',
                'report_directory': None
            }
        ]
        assert execution_list == expected_list
Exemple #10
0
    def test_define_execution_list_multiple_tests_datasets_drivers_envs(
            self, testdir_fixture):
        """Verify that the execution list is generated properly when there
        are multiple tests, data sets, drivers and environments
        """
        root_path = testdir_fixture['path']
        project = create_random_project(root_path)
        # create test one
        test_name_one = 'new_test_case_one'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name_one)
        # test data for test one
        tdata = [{
            'col1': 'a',
        }, {
            'col1': 'b',
        }]
        test_data.save_external_test_data_file(root_path, project,
                                               test_name_one, tdata)
        # create test two
        test_name_two = 'new_test_case_two'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name_two)

        # create two environments
        env_data = {"stage": {"url": "xxx"}, "preview": {"url": "yyy"}}
        env_data_json = json.dumps(env_data)
        environment_manager.save_environments(root_path, project,
                                              env_data_json)

        execution = {
            'tests': [test_name_one, test_name_two],
            'workers': 1,
            'drivers': ['chrome', 'firefox'],
            'environments': ['stage', 'preview'],
            'suite_before': None,
            'suite_after': None
        }

        execution_list = start_execution._define_execution_list(
            root_path, project, execution)
        expected_list = [{
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'a',
                'env': {
                    'url': 'xxx',
                    'name': 'stage'
                }
            },
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'a',
                'env': {
                    'url': 'xxx',
                    'name': 'stage'
                }
            },
            'driver': 'firefox',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'a',
                'env': {
                    'url': 'yyy',
                    'name': 'preview'
                }
            },
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'a',
                'env': {
                    'url': 'yyy',
                    'name': 'preview'
                }
            },
            'driver': 'firefox',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'b',
                'env': {
                    'url': 'xxx',
                    'name': 'stage'
                }
            },
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'b',
                'env': {
                    'url': 'xxx',
                    'name': 'stage'
                }
            },
            'driver': 'firefox',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'b',
                'env': {
                    'url': 'yyy',
                    'name': 'preview'
                }
            },
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'b',
                'env': {
                    'url': 'yyy',
                    'name': 'preview'
                }
            },
            'driver': 'firefox',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_two',
            'data_set': {
                'env': {
                    'url': 'xxx',
                    'name': 'stage'
                }
            },
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_two',
            'data_set': {
                'env': {
                    'url': 'xxx',
                    'name': 'stage'
                }
            },
            'driver': 'firefox',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_two',
            'data_set': {
                'env': {
                    'url': 'yyy',
                    'name': 'preview'
                }
            },
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_two',
            'data_set': {
                'env': {
                    'url': 'yyy',
                    'name': 'preview'
                }
            },
            'driver': 'firefox',
            'report_directory': None
        }]

        assert execution_list == expected_list