Exemple #1
0
    def test_get_test_data(self, testdir_fixture, project_fixture):
        input_data = [
            {
                'col1': 'a',
                'col2': 'b'
            },
            {
                'col1': 'c',
                'col2': 'd',
            }

        ]
        test_execution.settings = settings_manager.get_project_settings(testdir_fixture['path'],
                                                                        project_fixture['name'])
        test_execution.settings['test_data'] = 'csv'

        test_case.new_test_case(testdir_fixture['path'],
                                project_fixture['name'],
                                [],
                                'test_get_data')
        test_data.save_external_test_data_file(testdir_fixture['path'],
                                               project_fixture['name'],
                                               'test_get_data',
                                               input_data)
        returned_data = test_data.get_test_data(testdir_fixture['path'],
                                                project_fixture['name'],
                                                'test_get_data')
        assert returned_data == input_data
Exemple #2
0
    def test_define_execution_list_multiple_tests(self, testdir_fixture):
        """Verify that the execution list is generated properly when there
        are multiple tests in the list
        """
        root_path = testdir_fixture['path']
        project = create_random_project(root_path)
        # create test one
        test_name_one = 'new_test_case_one'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name_one)
        tdata = [{
            'col1': 'a',
            'col2': 'b'
        }, {
            'col1': 'c',
            'col2': 'd',
        }]
        test_data.save_external_test_data_file(root_path, project,
                                               test_name_one, tdata)

        # create test two
        test_name_two = 'new_test_case_two'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name_two)

        execution = {
            'tests': [test_name_one, test_name_two],
            'workers': 1,
            'drivers': ['chrome'],
            'environments': [''],
            'suite_before': None,
            'suite_after': None
        }

        execution_list = start_execution._define_execution_list(
            root_path, project, execution)

        expected_list = [{
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'a',
                'col2': 'b'
            },
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'c',
                'col2': 'd'
            },
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_two',
            'data_set': {},
            'driver': 'chrome',
            'report_directory': None
        }]
        assert execution_list == expected_list
Exemple #3
0
 def test_save_external_data_special_cases(self, project_function_clean,
                                           test_utils):
     testdir = project_function_clean['testdir']
     project = project_function_clean['name']
     test_name = test_utils.random_string(10, 'test')
     input_test_data = [{
         'key1': 'string with spaces'
     }, {
         'key1': 'string "with" quotes'
     }, {
         'key1': 'string \'with\' quotes'
     }, {
         'key1': '"quoted_string"'
     }, {
         'key1': '\'quoted_string\''
     }]
     test_data.save_external_test_data_file(testdir, project, test_name,
                                            input_test_data)
     data_path = os.path.join(testdir, 'projects', project, 'tests',
                              test_name + '.csv')
     with open(data_path) as f:
         result = f.read()
         expected = ('key1\n'
                     'string with spaces\n'
                     '"string ""with"" quotes"\n'
                     'string \'with\' quotes\n'
                     '"""quoted_string"""\n'
                     '\'quoted_string\'\n')
         assert result == expected
    def test_define_execution_list_multiple_data_sets(self, project_function_clean):
        """Verify that the execution list is generated properly when a test
        has multiple data sets
        """
        _, project = project_function_clean.activate()
        test_name = 'test_002'
        parents = []
        test_case.new_test_case(project, parents, test_name)
        tdata = [
            {
                'col1': 'a',
                'col2': 'b'
            },
            {
                'col1': 'c',
                'col2': 'd',
            }

        ]
        test_data.save_external_test_data_file(project, test_name, tdata)
        execution_runner = exc_runner.ExecutionRunner()
        execution_runner.tests = [test_name]
        execution_runner.execution.processes = 1
        execution_runner.execution.browsers = ['chrome']
        execution_runner.execution.envs = ['']
        execution_runner.project = project_function_clean.name
        execution_list = execution_runner._define_execution_list()
        expected_list = [
            SimpleNamespace(name=test_name, data_set={'col1': 'a', 'col2': 'b'}, secrets={},
                            browser='chrome', reportdir=None),
            SimpleNamespace(name=test_name, data_set={'col1': 'c', 'col2': 'd'}, secrets={},
                            browser='chrome', reportdir=None)
        ]
        assert execution_list == expected_list
Exemple #5
0
def save_test_case(root_path, project, full_test_case_name, description,
                   page_objects, test_steps, test_data):
    tc_name, parents = utils.separate_file_from_parents(full_test_case_name)
    test_case_path = os.path.join(root_path, 'projects', project, 'tests',
                                  os.sep.join(parents), '{}.py'.format(tc_name))
    formatted_description = format_description(description)
    
    with open(test_case_path, 'w', encoding='utf-8') as f:
        
        # write description
        f.write('\n')
        f.write(formatted_description)
        f.write('\n')
        # write the list of pages
        f.write('pages = {}\n'.format(format_page_object_string(page_objects)))
        f.write('\n')

        # write test data if required or save test data to external file
        if test_execution.settings['test_data'] == 'infile':
            if test_data:
                pretty = pprint.PrettyPrinter(indent=4, width=1)
                #f.write('data = ' + pretty.pformat(test_data) + '\n\n')
                f.write('data = {}'.format(format_data(test_data)))
                test_data_module.remove_csv_if_exists(root_path, project, full_test_case_name)
        else:
            test_data_module.save_external_test_data_file(root_path, project,
                                                          full_test_case_name,
                                                          test_data)

        # write the setup function
        f.write('def setup(data):\n')
        if test_steps['setup']:
            for step in test_steps['setup']:
                step_action = step['action'].replace(' ', '_')
                param_str = ', '.join(step['parameters'])
                f.write('    {0}({1})\n'.format(step_action, param_str))
        else:
            f.write('    pass\n')
        f.write('\n')
        
        # write the test function
        f.write('def test(data):\n')
        if test_steps['test']:
            for step in test_steps['test']:
                step_action = step['action'].replace(' ', '_')
                param_str = ', '.join(step['parameters'])
                f.write('    {0}({1})\n'.format(step_action, param_str))
        else:
            f.write('    pass\n')
        f.write('\n\n')
        
        # write the teardown function
        f.write('def teardown(data):\n')
        if test_steps['teardown']:
            for step in test_steps['teardown']:
                step_action = step['action'].replace(' ', '_')
                param_str = ', '.join(step['parameters'])
                f.write('    {0}({1})\n'.format(step_action, param_str))
        else:
            f.write('    pass\n')
Exemple #6
0
def save_test_case(project, full_test_case_name, description, page_objects,
                   test_steps, test_data, tags):
    """Save test case contents to file.

    full_test_case_name is a relative dot path to the test
    """
    test_case_path = test_file_path(project, full_test_case_name)
    formatted_description = _format_description(description)
    with open(test_case_path, 'w', encoding='utf-8') as f:
        # write description
        f.write('\n')
        f.write(formatted_description)
        f.write('\n')
        # write tags
        f.write('tags = {}\n'.format(_format_tags_string(tags)))
        f.write('\n')
        # write the list of pages
        f.write('pages = {}\n'.format(
            _format_page_object_string(page_objects)))
        f.write('\n')
        # write test data if required or save test data to external file
        settings = settings_manager.get_project_settings(project)
        if settings['test_data'] == 'infile':
            if test_data:
                f.write('data = {}'.format(_format_data(test_data)))
                test_data_module.remove_csv_if_exists(project,
                                                      full_test_case_name)
        else:
            test_data_module.save_external_test_data_file(
                project, full_test_case_name, test_data)
        # write the setup function
        f.write('def setup(data):\n')
        if test_steps['setup']:
            for step in test_steps['setup']:
                step_action = step['action'].replace(' ', '_')
                param_str = ', '.join(step['parameters'])
                f.write('    {0}({1})\n'.format(step_action, param_str))
        else:
            f.write('    pass\n')
        f.write('\n')
        # write the test function
        f.write('def test(data):\n')
        if test_steps['test']:
            for step in test_steps['test']:
                step_action = step['action'].replace(' ', '_')
                param_str = ', '.join(step['parameters'])
                f.write('    {0}({1})\n'.format(step_action, param_str))
        else:
            f.write('    pass\n')
        f.write('\n')
        # write the teardown function
        f.write('def teardown(data):\n')
        if test_steps['teardown']:
            for step in test_steps['teardown']:
                step_action = step['action'].replace(' ', '_')
                param_str = ', '.join(step['parameters'])
                f.write('    {0}({1})\n'.format(step_action, param_str))
        else:
            f.write('    pass\n')
Exemple #7
0
def save_test_case(root_path, project, full_test_case_name, description,
                   page_objects, test_steps, test_data):
    test_case_path = generate_test_case_path(root_path, project, full_test_case_name)
    formatted_description = format_description(description)
    
    with open(test_case_path, 'w', encoding='utf-8') as f:
        
        # write description
        f.write('\n')
        f.write(formatted_description)
        f.write('\n')
        # write the list of pages
        f.write('pages = {}\n'.format(format_page_object_string(page_objects)))
        f.write('\n')

        # write test data if required or save test data to external file
        if test_execution.settings['test_data'] == 'infile':
            if test_data:
                pretty = pprint.PrettyPrinter(indent=4, width=1)
                #f.write('data = ' + pretty.pformat(test_data) + '\n\n')
                f.write('data = {}'.format(format_data(test_data)))
                test_data_module.remove_csv_if_exists(root_path, project, full_test_case_name)
        else:
            test_data_module.save_external_test_data_file(root_path, project,
                                                          full_test_case_name,
                                                          test_data)

        # write the setup function
        f.write('def setup(data):\n')
        if test_steps['setup']:
            for step in test_steps['setup']:
                step_action = step['action'].replace(' ', '_')
                param_str = ', '.join(step['parameters'])
                f.write('    {0}({1})\n'.format(step_action, param_str))
        else:
            f.write('    pass\n')
        f.write('\n')
        
        # write the test function
        f.write('def test(data):\n')
        if test_steps['test']:
            for step in test_steps['test']:
                step_action = step['action'].replace(' ', '_')
                param_str = ', '.join(step['parameters'])
                f.write('    {0}({1})\n'.format(step_action, param_str))
        else:
            f.write('    pass\n')
        f.write('\n\n')
        
        # write the teardown function
        f.write('def teardown(data):\n')
        if test_steps['teardown']:
            for step in test_steps['teardown']:
                step_action = step['action'].replace(' ', '_')
                param_str = ', '.join(step['parameters'])
                f.write('    {0}({1})\n'.format(step_action, param_str))
        else:
            f.write('    pass\n')
    def test_define_execution_list_multiple_tests_datasets_drivers_envs(
            self, project_function_clean):
        """Verify that the execution list is generated properly when there
        are multiple tests, data sets, drivers and environments
        """
        _, project = project_function_clean.activate()
        # create test one
        test_name_one = 'test_one_005'
        parents = []
        test_case.new_test_case(project, parents, test_name_one)
        # test data for test one
        tdata = [
            {
                'col1': 'a',
            },
            {
                'col1': 'b',
            }

        ]
        test_data.save_external_test_data_file(project, test_name_one, tdata)
        # create test two
        test_name_two = 'test_two_005'
        parents = []
        test_case.new_test_case(project, parents, test_name_two)
        # create two environments
        env_data = {
            "stage": {
                "url": "xxx"
            },
            "preview": {
                "url": "yyy"
            }
        }
        env_data_json = json.dumps(env_data)
        environment_manager.save_environments(project, env_data_json)
        execution_runner = exc_runner.ExecutionRunner()
        execution_runner.tests = [test_name_one, test_name_two]
        execution_runner.execution.processes = 1
        execution_runner.execution.browsers = ['chrome', 'firefox']
        execution_runner.execution.envs = ['stage', 'preview']
        execution_runner.project = project
        execution_list = execution_runner._define_execution_list()
        expected_list = [
            SimpleNamespace(browser='chrome', data_set={'col1': 'a', 'env': {'url': 'xxx', 'name': 'stage'}}, secrets={}, name='test_one_005', reportdir=None),
            SimpleNamespace(browser='firefox', data_set={'col1': 'a', 'env': {'url': 'xxx', 'name': 'stage'}}, secrets={}, name='test_one_005', reportdir=None),
            SimpleNamespace(browser='chrome', data_set={'col1': 'a', 'env': {'url': 'yyy', 'name': 'preview'}}, secrets={}, name='test_one_005', reportdir=None),
            SimpleNamespace(browser='firefox', data_set={'col1': 'a', 'env': {'url': 'yyy', 'name': 'preview'}}, secrets={}, name='test_one_005', reportdir=None),
            SimpleNamespace(browser='chrome', data_set={'col1': 'b', 'env': {'url': 'xxx', 'name': 'stage'}}, secrets={}, name='test_one_005', reportdir=None),
            SimpleNamespace(browser='firefox', data_set={'col1': 'b', 'env': {'url': 'xxx', 'name': 'stage'}}, secrets={}, name='test_one_005', reportdir=None),
            SimpleNamespace(browser='chrome', data_set={'col1': 'b', 'env': {'url': 'yyy', 'name': 'preview'}}, secrets={}, name='test_one_005', reportdir=None),
            SimpleNamespace(browser='firefox', data_set={'col1': 'b', 'env': {'url': 'yyy', 'name': 'preview'}}, secrets={}, name='test_one_005', reportdir=None),
            SimpleNamespace(browser='chrome', data_set={'env': {'url': 'xxx', 'name': 'stage'}}, secrets={}, name='test_two_005', reportdir=None),
            SimpleNamespace(browser='firefox', data_set={'env': {'url': 'xxx', 'name': 'stage'}}, secrets={}, name='test_two_005', reportdir=None),
            SimpleNamespace(browser='chrome', data_set={'env': {'url': 'yyy', 'name': 'preview'}}, secrets={}, name='test_two_005', reportdir=None),
            SimpleNamespace(browser='firefox', data_set={'env': {'url': 'yyy','name': 'preview'}}, secrets={}, name='test_two_005', reportdir=None)
        ]
        assert execution_list == expected_list
 def test_define_execution_list_multiple_tests(self,
                                               project_function_clean):
     """Verify that the execution list is generated properly when there
     are multiple tests in the list
     """
     testdir = project_function_clean.testdir
     project = project_function_clean.name
     os.chdir(testdir)
     # create test one
     test_name_one = 'test_one_001'
     parents = []
     test_case.new_test_case(testdir, project, parents, test_name_one)
     tdata = [{
         'col1': 'a',
         'col2': 'b'
     }, {
         'col1': 'c',
         'col2': 'd',
     }]
     test_data.save_external_test_data_file(testdir, project, test_name_one,
                                            tdata)
     # create test two
     test_name_two = 'test_two_001'
     parents = []
     test_case.new_test_case(testdir, project, parents, test_name_two)
     execution_runner = exc_runner.ExecutionRunner()
     execution_runner.tests = [test_name_one, test_name_two]
     execution_runner.execution.processes = 1
     execution_runner.execution.browsers = ['chrome']
     execution_runner.execution.envs = ['']
     execution_runner.project = project
     execution_list = execution_runner._define_execution_list()
     expected_list = [
         SimpleNamespace(name='test_one_001',
                         data_set={
                             'col1': 'a',
                             'col2': 'b'
                         },
                         secrets={},
                         browser='chrome',
                         reportdir=None),
         SimpleNamespace(name='test_one_001',
                         data_set={
                             'col1': 'c',
                             'col2': 'd'
                         },
                         secrets={},
                         browser='chrome',
                         reportdir=None),
         SimpleNamespace(name='test_two_001',
                         data_set={},
                         secrets={},
                         browser='chrome',
                         reportdir=None)
     ]
     assert execution_list == expected_list
Exemple #10
0
 def test_save_external_data_empty_data(self, project_function_clean,
                                        test_utils):
     _, project = project_function_clean.activate()
     test_name = test_utils.random_string(10, 'test')
     input_test_data = []
     test_data.save_external_test_data_file(project, test_name,
                                            input_test_data)
     data_path = os.path.join(project_function_clean.path, 'tests',
                              test_name + '.csv')
     assert not os.path.isfile(data_path)
Exemple #11
0
 def test_save_external_data_empty_data(self, project_function_clean,
                                        test_utils):
     testdir = project_function_clean['testdir']
     project = project_function_clean['name']
     test_name = test_utils.random_string(10, 'test')
     input_test_data = []
     test_data.save_external_test_data_file(testdir, project, test_name,
                                            input_test_data)
     data_path = os.path.join(testdir, 'projects', project, 'tests',
                              test_name + '.csv')
     assert not os.path.isfile(data_path)
def edit_test_code(project, test_name, content, table_test_data):
    path = Test(project, test_name).path
    with open(path, 'w', encoding='utf-8') as f:
        f.write(content)
    # save test data
    settings = settings_manager.get_project_settings(project)
    if settings['test_data'] == 'csv':
        # save csv data
        test_data_module.save_external_test_data_file(project, test_name, table_test_data)
    elif settings['test_data'] == 'infile':
        # remove csv files
        test_data_module.remove_csv_if_exists(project, test_name)
Exemple #13
0
    def test_define_execution_list_multiple_data_sets(self,
                                                      project_function_clean):
        """Verify that the execution list is generated properly when a test
        has multiple data sets
        """
        root_path = project_function_clean['testdir']
        project = project_function_clean['name']
        os.chdir(root_path)
        test_name = 'new_test_case_002'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name)

        tdata = [{
            'col1': 'a',
            'col2': 'b'
        }, {
            'col1': 'c',
            'col2': 'd',
        }]
        test_data.save_external_test_data_file(root_path, project, test_name,
                                               tdata)

        execution = {
            'tests': [test_name],
            'workers': 1,
            'drivers': ['chrome'],
            'environments': [''],
            'suite_before': None,
            'suite_after': None
        }

        execution_list = start_execution._define_execution_list(
            root_path, project, execution)

        expected_list = [{
            'test_name': 'new_test_case_002',
            'data_set': {
                'col1': 'a',
                'col2': 'b'
            },
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_002',
            'data_set': {
                'col1': 'c',
                'col2': 'd'
            },
            'driver': 'chrome',
            'report_directory': None
        }]
        assert execution_list == expected_list
Exemple #14
0
 def test_save_external_data_empty_data_file_exists(self,
                                                    project_function_clean,
                                                    test_utils):
     _, project = project_function_clean.activate()
     test_name = test_utils.random_string(10, 'test')
     input_test_data = []
     data_path = os.path.join(project_function_clean.path, 'tests',
                              test_name + '.csv')
     open(data_path, 'w+').close()
     test_data.save_external_test_data_file(project, test_name,
                                            input_test_data)
     with open(data_path) as f:
         assert f.read() == ''
Exemple #15
0
def save_test_case_code(root_path, project, full_test_case_name, content, table_test_data):
    test_case_path = generate_test_case_path(root_path, project, full_test_case_name)
    with open(test_case_path, 'w', encoding='utf-8') as test_file:
        test_file.write(content)

    # save test data
    if table_test_data:
        #save csv data
        test_data_module.save_external_test_data_file(root_path, project,
                                                      full_test_case_name,
                                                      table_test_data)
    elif test_execution.settings['test_data'] == 'infile':
        # remove csv files
        test_data_module.remove_csv_if_exists(root_path, project, full_test_case_name)
    def test_define_execution_list_multiple_data_sets(self, testdir_fixture):
        """Verify that the execution list is generated properly when a test
        has multiple data sets
        """
        root_path = testdir_fixture['path']
        project = create_random_project(root_path)
        test_name = 'new_test_case_002'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name)

        tdata = [
            {
                'col1': 'a',
                'col2': 'b'
            },
            {
                'col1': 'c',
                'col2': 'd',
            }

        ]
        test_data.save_external_test_data_file(root_path, project, test_name, tdata)

        execution = {
            'tests': [test_name],
            'workers': 1,
            'drivers': ['chrome'],
            'environments': [''],
            'suite_before': None,
            'suite_after': None
        }

        execution_list = start_execution._define_execution_list(root_path, project,
                                                                execution)
        
        expected_list = [
            {
                'test_name': 'new_test_case_002',
                'data_set': {'col1': 'a', 'col2': 'b'},
                'driver': 'chrome',
                'report_directory': None
            },
            {
                'test_name': 'new_test_case_002',
                'data_set': {'col1': 'c', 'col2': 'd'},
                'driver': 'chrome',
                'report_directory': None
            }
        ]
        assert execution_list == expected_list
Exemple #17
0
def save_test_case_code(root_path, project, full_test_case_name, content, table_test_data):
    tc_name, parents = utils.separate_file_from_parents(full_test_case_name)
    test_case_path = os.path.join(root_path, 'projects', project, 'tests',
                                  os.sep.join(parents), '{}.py'.format(tc_name))
    with open(test_case_path, 'w', encoding='utf-8') as test_file:
        test_file.write(content)

    # save test data
    if table_test_data:
        #save csv data
        test_data_module.save_external_test_data_file(root_path, project,
                                                      full_test_case_name,
                                                      table_test_data)
    elif test_execution.settings['test_data'] == 'infile':
        # remove csv files
        test_data_module.remove_csv_if_exists(root_path, project, full_test_case_name)
Exemple #18
0
def save_test_case_code(root_path, project, full_test_case_name,
                        content, table_test_data):
    """Save test case contents string to file.
    full_test_case_name is a relative dot path to the test.
    """
    test_case_path = generate_test_case_path(root_path, project, full_test_case_name)
    with open(test_case_path, 'w') as test_file:
        test_file.write(content)
    # save test data
    if test_execution.settings['test_data'] == 'csv':
        #save csv data
        test_data_module.save_external_test_data_file(root_path, project,
                                                      full_test_case_name,
                                                      table_test_data)
    elif test_execution.settings['test_data'] == 'infile':
        # remove csv files
        test_data_module.remove_csv_if_exists(root_path, project, full_test_case_name)
Exemple #19
0
def save_test_case_code(project, full_test_case_name, content,
                        table_test_data):
    """Save test case contents string to file.
    full_test_case_name is a relative dot path to the test.
    """
    test_case_path = test_file_path(project, full_test_case_name)
    with open(test_case_path, 'w') as test_file:
        test_file.write(content)
    # save test data
    settings = settings_manager.get_project_settings(project)
    if settings['test_data'] == 'csv':
        #save csv data
        test_data_module.save_external_test_data_file(project,
                                                      full_test_case_name,
                                                      table_test_data)
    elif settings['test_data'] == 'infile':
        # remove csv files
        test_data_module.remove_csv_if_exists(project, full_test_case_name)
Exemple #20
0
 def test_save_external_data(self, project_function_clean, test_utils):
     _, project = project_function_clean.activate()
     test_name = test_utils.random_string(10, 'test')
     input_test_data = [{
         'key1': 'value1',
         'key2': 'value2'
     }, {
         'key1': 'value3',
         'key2': 'value4'
     }]
     test_data.save_external_test_data_file(project, test_name,
                                            input_test_data)
     data_path = os.path.join(project_function_clean.path, 'tests',
                              test_name + '.csv')
     with open(data_path) as f:
         result = f.read()
         expected = ('key1,key2\nvalue1,value2\nvalue3,value4\n')
         expected_var = ('key2,key1\nvalue2,value1\nvalue4,value3\n')
         assert result == expected or result == expected_var
def edit_test(project, test_name, description, pages, steps, test_data, tags, skip=False):
    """Save test contents to file"""

    def _format_description(description):
        """Format description string to store in test."""
        description = description.replace('"', '\\"').replace("'", "\\'")
        if '\n' in description:
            desc_lines = description.split('\n')
            formatted_description = 'description = \'\'\''
            for line in desc_lines:
                formatted_description = formatted_description + '\n' + line
            formatted_description = formatted_description + '\'\'\'\n'
        else:
            formatted_description = 'description = \'{}\'\n'.format(description)
        return formatted_description

    def _format_tags_string(tags):
        tags_string = ''
        for tag in tags:
            tags_string = tags_string + " '" + tag + "',"
        tags_string = "[{}]".format(tags_string.strip()[:-1])
        return tags_string

    def _format_page_string(pages):
        """Format page object string to store in test."""
        po_string = ''
        for page in pages:
            po_string = po_string + " '" + page + "',\n" + " " * 8
        po_string = "[{}]".format(po_string.strip()[:-1])
        return po_string

    def _format_data(test_data):
        result = '[\n'
        for data_set in test_data:
            result += '    {\n'
            for key, value in data_set.items():
                if not value:
                    value = "''"
                result += '        \'{}\': {},\n'.format(key, value)
            result += '    },\n'
        result += ']\n\n'
        return result

    def _format_steps(steps):
        step_str = ''
        for step in steps:
            if step['type'] == 'function-call':
                step_action = step['action'].replace(' ', '_')
                param_str = ', '.join(step['parameters'])
                step_str += '    {0}({1})\n'.format(step_action, param_str)
            else:
                lines = step['code'].splitlines()
                for line in lines:
                    step_str += '    {}\n'.format(line)
        return step_str

    path = Test(project, test_name).path
    settings = settings_manager.get_project_settings(project)
    with open(path, 'w', encoding='utf-8') as f:
        if not settings['implicit_actions_import']:
            f.write('from golem import actions\n\n')
        if not settings['implicit_page_import']:
            for page in pages:
                split = page.split('.')
                top = split.pop()
                parents = '.'.join(split)
                parents = '.{}'.format(parents) if parents else ''
                f.write('from projects.{}.pages{} import {}\n'.format(project, parents, top))
            f.write('\n')
        f.write('\n')
        f.write(_format_description(description))
        f.write('\n')
        f.write('tags = {}\n'.format(_format_tags_string(tags)))
        f.write('\n')
        if settings['implicit_page_import']:
            f.write('pages = {}\n'.format(_format_page_string(pages)))
            f.write('\n')
        if settings['test_data'] == 'infile':
            if test_data:
                f.write('data = {}'.format(_format_data(test_data)))
                test_data_module.remove_csv_if_exists(project, test_name)
        else:
            test_data_module.save_external_test_data_file(project, test_name, test_data)
        if skip:
            if type(skip) is str:
                skip = "'{}'".format(skip)
            f.write('skip = {}\n\n'.format(skip))
        f.write('\n')
        f.write('def setup(data):\n')
        if steps['setup']:
            f.write(_format_steps(steps['setup']))
        else:
            f.write('    pass\n')
        f.write('\n\n')
        f.write('def test(data):\n')
        if steps['test']:
            f.write(_format_steps(steps['test']))
        else:
            f.write('    pass\n')
        f.write('\n\n')
        f.write('def teardown(data):\n')
        if steps['teardown']:
            f.write(_format_steps(steps['teardown']))
        else:
            f.write('    pass\n')
Exemple #22
0
def save_test_case(root_path, project, full_test_case_name, description,
                   app_objects, test_steps, test_data):
    """Save test case contents to file.

    full_test_case_name is a relative dot path to the test
    """
    test_case_path = generate_test_case_path(root_path, project,
                                             full_test_case_name)
    formatted_description = _format_description(description)
    with open(test_case_path, 'w', encoding='utf-8') as f:
        # write description
        f.write('\n')
        f.write(formatted_description)
        f.write('\n')
        # write the list of page
        # f.write('pages = {}\n'.format(_format_page_object_string(page_objects)))
        # f.write('\n')
        f.write('apps = {}\n'.format(app_objects))
        f.write('\n')
        # write test data if required or save test data to external file
        if test_execution.settings['test_data'] == 'infile':
            if test_data:
                pretty = pprint.PrettyPrinter(indent=4, width=1)
                #f.write('data = ' + pretty.pformat(test_data) + '\n\n')
                f.write('data = {}'.format(_format_data(test_data)))
                test_data_module.remove_csv_if_exists(root_path, project,
                                                      full_test_case_name)
        else:
            test_data_module.save_external_test_data_file(
                root_path, project, full_test_case_name, test_data)
        # write the setup function
        f.write('def setup(self):\n')
        if test_steps['setup']:
            #添加appium配置信息
            f.write("    self.desired_caps = {}\n")
            f.write("    self.desired_caps['platformName'] = 'Android'\n")
            f.write(
                "    self.desired_caps['deviceName'] = 'KVD6JZ7999999999' \n")
            f.write("    self.desired_caps['platformVersion'] = '5.0.2'\n")
            f.write("    self.dess['app'] = '" + app_objects['apppath'] +
                    "'\n")
            f.write("    self.desired_caps['appPackage'] = '" +
                    app_objects['appPackagename'] + "'\n")
            f.write("    self.desired_caps['appActivity'] = '" +
                    app_objects['appActivityname'] + "'\n")
            f.write(
                "    self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', self.desired_caps)\n"
            )
            for step in test_steps['setup']:
                step_action = step['action'].replace(' ', '_')
                parameters = step['parameters'][0]
                print("step['parameters'][0]============", parameters)
                if (parameters['way'] != None):
                    way = parameters['way']
                elif (parameters['element'] != None):
                    element = parameters['element']
                elif (parameters['value'] != None):
                    value = parameters['value']
                param_str = ', '.join(way)
                f.write('    {0}({1})\n'.format(step_action, param_str))
        else:
            # 添加appium配置信息
            f.write("    self.desired_caps = {}\n")
            f.write("    self.desired_caps['platformName'] = 'Android'\n")
            f.write(
                "    self.desired_caps['deviceName'] = 'KVD6JZ7999999999' \n")
            f.write("    self.desired_caps['platformVersion'] = '5.0.2'\n")
            f.write("    self.desired_caps['app'] = '" +
                    app_objects['apppath'] + "'\n")
            f.write("    self.desired_caps['appPackage'] = '" +
                    app_objects['appPackagename'] + "'\n")
            f.write("    self.desired_caps['appActivity'] = '" +
                    app_objects['appActivityname'] + "'\n")
            f.write(
                "    self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', self.desired_caps)\n"
            )
        f.write('\n')
        # write the test function
        f.write('def test(data):\n')
        if test_steps['test']:
            for step in test_steps['test']:
                step_action = step['action'].replace(' ', '_')
                parameters = step['parameters'][0]
                print("step['parameters'][0]============", parameters)
                if (parameters['way'] != None):
                    way = parameters['way']
                elif (parameters['element'] != None):
                    element = parameters['element']
                elif (parameters['value'] != None):
                    value = parameters['value']
                param_str = ', '.join(way)
                f.write('    {0}({1})\n'.format(step_action, param_str))
        else:
            f.write('    pass\n')
        f.write('\n\n')
        # write the teardown function
        f.write('def teardown(data):\n')
        if test_steps['teardown']:
            for step in test_steps['teardown']:
                step_action = step['action'].replace(' ', '_')
                parameters = step['parameters'][0]
                print("step['parameters'][0]============", parameters)
                if (parameters['way'] != None):
                    way = parameters['way']
                elif (parameters['element'] != None):
                    element = parameters['element']
                elif (parameters['value'] != None):
                    value = parameters['value']
                param_str = ', '.join(way)
                f.write('    {0}({1})\n'.format(step_action, param_str))
        else:
            f.write('    pass\n')
Exemple #23
0
    def test_define_execution_list_multiple_tests_datasets_drivers_envs(
            self, testdir_fixture):
        """Verify that the execution list is generated properly when there
        are multiple tests, data sets, drivers and environments
        """
        root_path = testdir_fixture['path']
        project = create_random_project(root_path)
        # create test one
        test_name_one = 'new_test_case_one'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name_one)
        # test data for test one
        tdata = [{
            'col1': 'a',
        }, {
            'col1': 'b',
        }]
        test_data.save_external_test_data_file(root_path, project,
                                               test_name_one, tdata)
        # create test two
        test_name_two = 'new_test_case_two'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name_two)

        # create two environments
        env_data = {"stage": {"url": "xxx"}, "preview": {"url": "yyy"}}
        env_data_json = json.dumps(env_data)
        environment_manager.save_environments(root_path, project,
                                              env_data_json)

        execution = {
            'tests': [test_name_one, test_name_two],
            'workers': 1,
            'drivers': ['chrome', 'firefox'],
            'environments': ['stage', 'preview'],
            'suite_before': None,
            'suite_after': None
        }

        execution_list = start_execution._define_execution_list(
            root_path, project, execution)
        expected_list = [{
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'a',
                'env': {
                    'url': 'xxx',
                    'name': 'stage'
                }
            },
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'a',
                'env': {
                    'url': 'xxx',
                    'name': 'stage'
                }
            },
            'driver': 'firefox',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'a',
                'env': {
                    'url': 'yyy',
                    'name': 'preview'
                }
            },
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'a',
                'env': {
                    'url': 'yyy',
                    'name': 'preview'
                }
            },
            'driver': 'firefox',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'b',
                'env': {
                    'url': 'xxx',
                    'name': 'stage'
                }
            },
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'b',
                'env': {
                    'url': 'xxx',
                    'name': 'stage'
                }
            },
            'driver': 'firefox',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'b',
                'env': {
                    'url': 'yyy',
                    'name': 'preview'
                }
            },
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_one',
            'data_set': {
                'col1': 'b',
                'env': {
                    'url': 'yyy',
                    'name': 'preview'
                }
            },
            'driver': 'firefox',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_two',
            'data_set': {
                'env': {
                    'url': 'xxx',
                    'name': 'stage'
                }
            },
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_two',
            'data_set': {
                'env': {
                    'url': 'xxx',
                    'name': 'stage'
                }
            },
            'driver': 'firefox',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_two',
            'data_set': {
                'env': {
                    'url': 'yyy',
                    'name': 'preview'
                }
            },
            'driver': 'chrome',
            'report_directory': None
        }, {
            'test_name': 'new_test_case_two',
            'data_set': {
                'env': {
                    'url': 'yyy',
                    'name': 'preview'
                }
            },
            'driver': 'firefox',
            'report_directory': None
        }]

        assert execution_list == expected_list
    def test_define_execution_list_multiple_tests_datasets_drivers_envs(self, testdir_fixture):
        """Verify that the execution list is generated properly when there
        are multiple tests, data sets, drivers and environments
        """
        root_path = testdir_fixture['path']
        project = create_random_project(root_path)
        # create test one
        test_name_one = 'new_test_case_one'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name_one)
        # test data for test one
        tdata = [
            {
                'col1': 'a',
            },
            {
                'col1': 'b',
            }

        ]
        test_data.save_external_test_data_file(root_path, project, test_name_one, tdata)
        # create test two
        test_name_two = 'new_test_case_two'
        parents = []
        test_case.new_test_case(root_path, project, parents, test_name_two)

        # create two environments
        env_data = {
            "stage": {
                "url": "xxx"
            },
            "preview": {
                "url": "yyy"
            }
        }
        env_data_json = json.dumps(env_data)
        environment_manager.save_environments(root_path, project, env_data_json)
        
        execution = {
            'tests': [test_name_one, test_name_two],
            'workers': 1,
            'drivers': ['chrome', 'firefox'],
            'environments': ['stage', 'preview'],
            'suite_before': None,
            'suite_after': None
        }

        execution_list = start_execution._define_execution_list(root_path, project,
                                                                execution)
        expected_list = [
        {'test_name': 'new_test_case_one', 'data_set': {'col1': 'a', 'env': {'url': 'xxx', 'name': 'stage'}}, 'driver': 'chrome', 'report_directory': None},
        {'test_name': 'new_test_case_one', 'data_set': {'col1': 'a', 'env': {'url': 'xxx', 'name': 'stage'}}, 'driver': 'firefox', 'report_directory': None},
        {'test_name': 'new_test_case_one', 'data_set': {'col1': 'a', 'env': {'url': 'yyy', 'name': 'preview'}}, 'driver': 'chrome', 'report_directory': None},
        {'test_name': 'new_test_case_one', 'data_set': {'col1': 'a', 'env': {'url': 'yyy', 'name': 'preview'}}, 'driver': 'firefox', 'report_directory': None},
        {'test_name': 'new_test_case_one', 'data_set': {'col1': 'b', 'env': {'url': 'xxx', 'name': 'stage'}}, 'driver': 'chrome', 'report_directory': None},
        {'test_name': 'new_test_case_one', 'data_set': {'col1': 'b', 'env': {'url': 'xxx', 'name': 'stage'}}, 'driver': 'firefox', 'report_directory': None},
        {'test_name': 'new_test_case_one', 'data_set': {'col1': 'b', 'env': {'url': 'yyy', 'name': 'preview'}}, 'driver': 'chrome', 'report_directory': None},
        {'test_name': 'new_test_case_one', 'data_set': {'col1': 'b', 'env': {'url': 'yyy', 'name': 'preview'}}, 'driver': 'firefox', 'report_directory': None},
        {'test_name': 'new_test_case_two', 'data_set': {'env': {'url': 'xxx', 'name': 'stage'}}, 'driver': 'chrome', 'report_directory': None},
        {'test_name': 'new_test_case_two', 'data_set': {'env': {'url': 'xxx', 'name': 'stage'}}, 'driver': 'firefox', 'report_directory': None},
        {'test_name': 'new_test_case_two', 'data_set': {'env': {'url': 'yyy', 'name': 'preview'}}, 'driver': 'chrome', 'report_directory': None},
        {'test_name': 'new_test_case_two', 'data_set': {'env': {'url': 'yyy', 'name': 'preview'}}, 'driver': 'firefox', 'report_directory': None}]

        assert execution_list == expected_list