Esempio n. 1
0
    def test_run_test__exception_in_test(self, project_function_clean, caplog, test_utils):
        """test() throws exception"""
        testdir = project_function_clean.testdir
        project = project_function_clean.name
        test_name = test_utils.random_numeric_string(10)
        content = """
description = 'desc'

def setup(data):
    step('setup step')

def test(data):
    foo = bar

def teardown(data):
    step('teardown step')
"""
        self._create_test(testdir, project, test_name, content)
        report_directory = self._mock_report_directory(testdir, project, test_name)
        settings = settings_manager.get_project_settings(testdir, project)
        browser = _define_browsers_mock(['chrome'])[0]
        test_runner.run_test(workspace=testdir, project=project, test_name=test_name,
                             test_data={}, secrets={}, browser=browser, settings=settings,
                             report_directory=report_directory)
        # verify console logs
        records = caplog.records
        assert records[5].message == 'Test Result: CODE ERROR'
        # verify report.json
        report = self._read_report_json(report_directory)
        assert report['result'] == 'code error'
        assert len(report['steps']) == 3
        assert len(report['errors']) == 1
        assert report['errors'][0]['message'] == "NameError: name 'bar' is not defined"
Esempio n. 2
0
    def test_run_test__failure_in_test(self, project_function_clean, caplog, test_utils):
        """test() throws AssertionError
        teardown() is run
        """
        testdir, project = project_function_clean.activate()
        test_name = test_utils.random_numeric_string(10)
        content = """
description = 'desc'

def setup(data):
    step('setup step')

def test(data):
    step('test step')
    fail('test fail')

def teardown(data):
    step('teardown step')
"""
        self._create_test(testdir, project, test_name, content)
        report_directory = self._mock_report_directory(testdir, project, test_name)
        settings = settings_manager.get_project_settings(project)
        browser = _define_browsers_mock(['chrome'])[0]
        test_runner.run_test(testdir=testdir, project=project, test_name=test_name,
                             test_data={}, secrets={}, browser=browser, settings=settings,
                             report_directory=report_directory)
        # verify console logs
        records = caplog.records
        assert records[6].message == 'Test Result: FAILURE'
        # verify report.json
        report = self._read_report_json(report_directory)
        assert report['result'] == 'failure'
        assert len(report['steps']) == 4
        assert len(report['errors']) == 1
        assert report['errors'][0]['message'] == 'AssertionError: test fail'
Esempio n. 3
0
    def test_run_test__success_with_data(self, project_function_clean, caplog, test_utils):
        """Test runs successfully with test data"""
        testdir = project_function_clean.testdir
        project = project_function_clean.name
        test_name = test_utils.random_numeric_string(10)
        content = """
description = 'some description'
    
def setup(data):
    step('setup step')

def test(data):
    step('test step')

def teardown(data):
    step('teardown step')
"""
        self._create_test(testdir, project, test_name, content)
        report_directory = self._mock_report_directory(testdir, project,
                                                       test_name)
        settings = settings_manager.get_project_settings(testdir, project)
        browser = _define_browsers_mock(['chrome'])[0]
        test_data = dict(username='******', password='******')
        secrets = dict(very='secret')
        # run test
        test_runner.run_test(workspace=testdir, project=project, test_name=test_name,
                             test_data=test_data, secrets=secrets, browser=browser, settings=settings,
                             report_directory=report_directory)
        # verify console logs
        records = caplog.records
        assert records[0].message == 'Test execution started: {}'.format(
            test_name)
        assert records[1].message == 'Browser: chrome'
        # Python 3.4 results not in order TODO
        value_a = 'Using data:\n    username: username1\n    password: password1\n'
        value_b = 'Using data:\n    password: password1\n    username: username1\n'
        assert records[2].message in [value_a, value_b]
        assert records[3].message == 'setup step'
        assert records[4].message == 'test step'
        assert records[5].message == 'teardown step'
        assert records[6].message == 'Test Result: SUCCESS'
        # verify report.json
        report = self._read_report_json(report_directory)
        assert report['browser'] == 'chrome'
        assert report['description'] == 'some description'
        assert report['environment'] == ''
        assert report['errors'] == []
        assert report['result'] == 'success'
        # Python 3.4 TODO
        assert report['set_name'] in ['username1', 'password1']
        assert report['steps'] == [
            {'message': 'setup step', 'screenshot': None, 'error': None},
            {'message': 'test step', 'screenshot': None, 'error': None},
            {'message': 'teardown step', 'screenshot': None, 'error': None},
        ]
        assert report['test_case'] == test_name
        assert report['test_data'] == {'username': "******", 'password': "******"}
        assert 'test_elapsed_time' in report
        assert 'test_timestamp' in report
        assert len(report.keys()) == 11
Esempio n. 4
0
    def test_run_test__import_error_page_object(self, project_function_clean,
                                                caplog, test_utils):
        """The test fails with 'code error' when an imported page has a syntax error"""
        testdir = project_function_clean.testdir
        project = project_function_clean.name
        test_name = test_utils.random_numeric_string(10)
        content = """
pages = ['page1']

def setup(data):
    step('this step wont be run')

def test(data):
    step('this step wont be run')

def teardown(data):
    step('this step wont be run')
"""
        self._create_test(testdir, project, test_name, content)
        page_content = """
element1 = ('id', 'someId'
element2 = ('css', '.oh.no')
"""
        self._create_page(testdir, project, 'page1', page_content)
        report_directory = self._mock_report_directory(testdir, project, test_name)
        settings = settings_manager.get_project_settings(testdir, project)
        browser = _define_browsers_mock(['chrome'])[0]
        test_runner.run_test(workspace=testdir, project=project, test_name=test_name,
                             test_data={}, secrets={}, browser=browser, settings=settings,
                             report_directory=report_directory)
        # verify console logs
        records = caplog.records
        assert records[0].message == 'Test execution started: {}'.format(test_name)
        assert records[1].message == 'Browser: chrome'
        assert records[2].levelname == 'ERROR'
        error_contains = "element2 = ('css', '.oh.no')\n           ^\nSyntaxError: invalid syntax"
        assert error_contains in records[2].message
        assert records[3].message == 'Test Result: CODE ERROR'
        # verify report.json
        report = self._read_report_json(report_directory)
        assert report['browser'] == 'chrome'
        assert report['description'] is None  # description could not be read
        assert report['environment'] == ''
        assert len(report['errors']) == 1
        assert 'SyntaxError: invalid syntax' in report['errors'][0]['message']
        assert error_contains in report['errors'][0]['description']
        assert report['result'] == 'code error'
        assert report['set_name'] == ''
        assert report['steps'] == []
        assert report['test_case'] == test_name
        assert report['test_data'] == {}
        assert 'test_elapsed_time' in report
        assert 'test_timestamp' in report
        assert len(report.keys()) == 11
Esempio n. 5
0
 def run_test(code, test_data={}, secrets={}, from_suite=False):
     test_module.edit_test_code(project, test_name, code, [])
     test_runner.run_test(testdir,
                          project,
                          test_name,
                          test_data,
                          secrets,
                          browser,
                          settings,
                          report_directory,
                          from_suite=from_suite)
Esempio n. 6
0
    def test_run_test__success(self, project_function_clean, caplog, test_utils):
        """Test runs successfully"""
        testdir = project_function_clean.testdir
        project = project_function_clean.name
        test_name = test_utils.random_numeric_string(10)
        content = """
description = 'some description'

def setup(data):
    step('setup step')

def test(data):
    step('test step')

def teardown(data):
    step('teardown step')
"""
        self._create_test(testdir, project, test_name, content)
        report_directory = self._mock_report_directory(testdir, project, test_name)
        settings = settings_manager.get_project_settings(testdir, project)
        browser = _define_browsers_mock(['chrome'])[0]
        # run test
        test_runner.run_test(workspace=testdir, project=project, test_name=test_name,
                             test_data={}, secrets={}, browser=browser, settings=settings,
                             report_directory=report_directory)
        # verify console logs
        records = caplog.records
        assert records[0].message == 'Test execution started: {}'.format(test_name)
        assert records[1].message == 'Browser: chrome'
        assert records[2].message == 'setup step'
        assert records[3].message == 'test step'
        assert records[4].message == 'teardown step'
        assert records[5].message == 'Test Result: SUCCESS'
        # verify report.json
        report = self._read_report_json(report_directory)
        assert report['browser'] == 'chrome'
        assert report['description'] == 'some description'
        assert report['environment'] == ''
        assert report['errors'] == []
        assert report['result'] == 'success'
        assert report['set_name'] == ''
        assert report['steps'] == [
            {'message': 'setup step', 'screenshot': None, 'error': None},
            {'message': 'test step', 'screenshot': None, 'error': None},
            {'message': 'teardown step', 'screenshot': None, 'error': None},
        ]
        assert report['test_case'] == test_name
        assert report['test_data'] == {}
        assert 'test_elapsed_time' in report
        assert 'test_timestamp' in report
        assert len(report.keys()) == 11
Esempio n. 7
0
    def test_run_test__AssertionError_in_setup(self, project_function_clean,
                                               caplog, test_utils):
        """The test ends with 'failure' when the setup function throws AssertionError.
        Test is not run
        Teardown is run
        """
        testdir = project_function_clean.testdir
        project = project_function_clean.name
        test_name = test_utils.random_numeric_string(10)
        content = """
description = 'desc'

def setup(data):
    fail('setup step fail')

def test(data):
    step('test step')

def teardown(data):
    step('teardown step')
"""
        self._create_test(testdir, project, test_name, content)
        report_directory = self._mock_report_directory(testdir, project, test_name)
        settings = settings_manager.get_project_settings(testdir, project)
        browser = _define_browsers_mock(['chrome'])[0]
        test_runner.run_test(workspace=testdir, project=project, test_name=test_name,
                             test_data={}, secrets={}, browser=browser, settings=settings,
                             report_directory=report_directory)
        # verify console logs
        records = caplog.records
        assert records[0].message == 'Test execution started: {}'.format(test_name)
        assert records[1].message == 'Browser: chrome'
        assert records[2].levelname == 'ERROR'
        assert 'setup step fail' in records[2].message
        assert 'AssertionError: setup step fail' in records[2].message
        assert records[3].message == 'teardown step'
        assert records[4].message == 'Test Result: FAILURE'
        # verify report.json
        report = self._read_report_json(report_directory)
        assert report['description'] == 'desc'
        assert len(report['errors']) == 1
        assert 'setup step fail' in report['errors'][0]['message']
        assert report['result'] == 'failure'
        assert report['steps'][0]['message'] == 'Failure'
        assert 'AssertionError: setup step fail' in report['steps'][0]['error']['description']
        assert report['steps'][1]['message'] == 'teardown step'
Esempio n. 8
0
 def run_test(code,
              test_data={},
              secrets={},
              from_suite=False,
              set_name=''):
     set_content(code)
     test_runner.run_test(testdir,
                          project,
                          test_name,
                          test_data,
                          secrets,
                          browser,
                          env_name,
                          settings,
                          exec_dir,
                          set_name=set_name,
                          test_functions=[],
                          from_suite=from_suite)
Esempio n. 9
0
    def test_run_test__import_error_on_test(self, project_function_clean, caplog, test_utils):
        """The test fails with 'code error' when it has a syntax error
        Test result is code error"""
        testdir = project_function_clean.testdir
        project = project_function_clean.name
        test_name = test_utils.random_numeric_string(10)
        content = """
description = 'some description'

# missing colon
def test(data)
    step('this step wont be run')
"""
        self._create_test(testdir, project, test_name, content)
        report_directory = self._mock_report_directory(testdir, project,
                                                       test_name)
        settings = settings_manager.get_project_settings(testdir, project)
        browser = _define_browsers_mock(['chrome'])[0]
        test_runner.run_test(workspace=testdir, project=project, test_name=test_name,
                             test_data={}, secrets={}, browser=browser, settings=settings,
                             report_directory=report_directory)
        # verify console logs
        records = caplog.records
        assert records[0].message == 'Test execution started: {}'.format(
            test_name)
        assert records[1].message == 'Browser: chrome'
        assert records[2].levelname == 'ERROR'
        error_contains = 'def test(data)\n                 ^\nSyntaxError: invalid syntax'
        assert error_contains in records[2].message
        assert records[3].message == 'Test Result: CODE ERROR'
        # verify report.json
        report = self._read_report_json(report_directory)
        assert report['browser'] == 'chrome'
        assert report['description'] is None  # description could not be read
        assert report['environment'] == ''
        assert len(report['errors']) == 1
        assert report['errors'][0]['message'] == 'SyntaxError: invalid syntax'
        assert error_contains in report['errors'][0]['description']
        assert report['result'] == 'code error'
        assert report['set_name'] == ''
        assert report['steps'] == []
        assert report['test_case'] == test_name
        assert report['test_data'] == {}
Esempio n. 10
0
    def _execute(self):
        self.start_time = time.time()
        suite_error = False

        # run suite `before` function
        if self.suite.before:
            try:
                self.suite.before.__call__()
            except:
                print('ERROR: suite before function failed')
                print(traceback.format_exc())

        if not suite_error:
            if self.interactive and self.execution.processes != 1:
                print(
                    'WARNING: to run in debug mode, processes must equal one')

            if self.execution.processes == 1:
                # run tests serially
                for test in self.execution.tests:
                    run_test(session.testdir, self.project.name, test.name,
                             test.data_set, test.secrets, test.browser,
                             test.env, session.settings, test.reportdir,
                             test.set_name, self.test_functions,
                             self.execution.has_failed_tests,
                             self.execution.tags, self.is_suite)
            else:
                # run tests using multiprocessing
                multiprocess_executor(self.project.name, self.execution.tests,
                                      self.execution.has_failed_tests,
                                      self.test_functions,
                                      self.execution.processes,
                                      self.execution.tags, self.is_suite)

        # run suite `after` function
        if self.suite.after:
            try:
                self.suite.after.__call__()
            except:
                print('ERROR: suite before function failed')
                print(traceback.format_exc())

        self._finalize()
Esempio n. 11
0
    def execute(self):
        start_time = time.time()
        suite_error = False

        # run suite `before` function
        if self.suite.before:
            try:
                self.suite.before.__call__()
            except:
                print('ERROR: suite before function failed')
                print(traceback.format_exc())

        if not suite_error:
            if self.interactive and self.execution.processes != 1:
                print('WARNING: to run in debug mode, threads must equal one')

            if self.execution.processes == 1:
                # run tests serially
                for test in self.execution.tests:
                    run_test(test_execution.root_path, self.project, test.name,
                             test.data_set, test.browser,
                             test_execution.settings, test.reportdir)
            else:
                # run tests using multiprocessing
                multiprocess_executor(self.project, self.execution.tests,
                                      self.execution.processes)

        # run suite `after` function
        if self.suite.after:
            try:
                self.suite.after.__call__()
            except:
                print('ERROR: suite before function failed')
                print(traceback.format_exc())

        # generate execution_result.json
        elapsed_time = round(time.time() - start_time, 2)
        report_parser.generate_execution_report(self.execution.reportdir,
                                                elapsed_time)
Esempio n. 12
0
    def test_run_test__error_in_setup_test_and_teardown(self, project_function_clean,
                                                        caplog, test_utils):
        """setup(), test() and teardown() have errors
        """
        testdir, project = project_function_clean.activate()
        test_name = test_utils.random_numeric_string(10)
        content = """
description = 'desc'

def setup(data):
    error('setup error')

def test(data):
    error('test error')

def teardown(data):
    error('teardown error')
"""
        self._create_test(testdir, project, test_name, content)
        report_directory = self._mock_report_directory(testdir, project, test_name)
        settings = settings_manager.get_project_settings(project)
        browser = _define_browsers_mock(['chrome'])[0]
        test_runner.run_test(testdir=testdir, project=project, test_name=test_name,
                             test_data={}, secrets={}, browser=browser, settings=settings,
                             report_directory=report_directory)
        # verify console logs
        records = caplog.records
        assert records[5].message == 'Test Result: ERROR'
        # verify report.json
        report = self._read_report_json(report_directory)
        assert report['result'] == 'error'
        assert len(report['steps']) == 3
        assert len(report['errors']) == 3
        assert report['errors'][0]['message'] == 'setup error'
        assert report['errors'][1]['message'] == 'test error'
        assert report['errors'][2]['message'] == 'teardown error'
Esempio n. 13
0
    def _execute(self):
        start_time = time.time()
        suite_error = False

        # run suite `before` function
        if self.suite.before:
            try:
                self.suite.before.__call__()
            except:
                print('ERROR: suite before function failed')
                print(traceback.format_exc())

        if not suite_error:
            if self.interactive and self.execution.processes != 1:
                print('WARNING: to run in debug mode, threads must equal one')

            if self.execution.processes == 1:
                # run tests serially
                for test in self.execution.tests:
                    run_test(test_execution.root_path, self.project, test.name,
                             test.data_set, test.secrets, test.browser,
                             test_execution.settings, test.reportdir,
                             self.execution.has_failed_tests)
            else:
                # run tests using multiprocessing
                multiprocess_executor(self.project, self.execution.tests,
                                      self.execution.has_failed_tests,
                                      self.execution.processes)

        # run suite `after` function
        if self.suite.after:
            try:
                self.suite.after.__call__()
            except:
                print('ERROR: suite before function failed')
                print(traceback.format_exc())

        # generate report.json
        elapsed_time = round(time.time() - start_time, 2)
        self.report = report_parser.generate_execution_report(self.execution.reportdir,
                                                              elapsed_time)

        if self.is_suite:
            self._print_results()

        # generate requested reports
        if self.is_suite:
            report_name = self.report_name or 'report'
            report_folder = self.report_folder or self.execution.reportdir
            if 'junit' in self.reports:
                report_parser.generate_junit_report(self.execution.reportdir,
                                                    self.suite_name, self.timestamp,
                                                    self.report_folder, report_name)
            if 'json' in self.reports and (self.report_folder or self.report_name):
                report_parser.save_execution_json_report(self.report, report_folder, report_name)
            if 'html' in self.reports:
                gui_utils.generate_html_report(self.project, self.suite_name,
                                               self.timestamp, self.report_folder,
                                               report_name)
            if 'html-no-images' in self.reports:
                if 'html' in self.reports:
                    report_name = report_name + '-no-images'
                gui_utils.generate_html_report(self.project, self.suite_name, self.timestamp,
                                               self.report_folder, report_name,
                                               no_images=True)

        # exit to the console with exit status code 1 in case a test fails
        if self.execution.has_failed_tests.value:
            sys.exit(1)
Esempio n. 14
0
def run_test_or_suite(workspace,
                      project,
                      test=None,
                      suite=None,
                      directory=None):
    """Run a suite or test or directory containing tests."""
    execution = {
        'tests': [],
        'workers': 1,
        'drivers': [],
        'environments': [],
        'suite_before': None,
        'suite_after': None
    }

    suite_amount_workers = None
    suite_drivers = None
    suite_envs = []
    suite_name = None
    is_suite = False

    if test:
        execution['tests'] = [test]
        suite_name = 'single_tests'
    elif suite:
        execution['tests'] = suite_module.get_suite_test_cases(
            workspace, project, suite)
        suite_amount_workers = suite_module.get_suite_amount_of_workers(
            workspace, project, suite)
        suite_drivers = suite_module.get_suite_browsers(
            workspace, project, suite)
        suite_envs = suite_module.get_suite_environments(
            workspace, project, suite)
        suite_imported_module = suite_module.get_suite_module(
            workspace, project, suite)
        execution['suite_before'] = getattr(suite_imported_module, 'before',
                                            None)
        execution['suite_after'] = getattr(suite_imported_module, 'after',
                                           None)
        suite_name = suite
        is_suite = True
    elif directory:
        execution['tests'] = utils.get_directory_test_cases(
            workspace, project, directory)
        suite_name = directory
        is_suite = True
    else:
        sys.exit("ERROR: invalid arguments for run_test_or_suite()")

    # warn if no tests were found
    if len(execution['tests']) == 0:
        print('Warning: no tests were found')

    # get amount of workers (parallel executions), default is 1
    if test_execution.thread_amount:
        # the thread count passed through cli has higher priority
        execution['workers'] = test_execution.thread_amount
    elif suite_amount_workers:
        execution['workers'] = suite_amount_workers

    # select the drivers to use in this execution
    # the order of precedence is:
    # 1. drivers defined by CLI
    # 2. drivers defined inside a suite
    # 3. 'default_driver' setting
    # 4. default default is 'chrome'
    settings_default_driver = test_execution.settings['default_browser']
    selected_drivers = utils.choose_driver_by_precedence(
        cli_drivers=test_execution.cli_drivers,
        suite_drivers=suite_drivers,
        settings_default_driver=settings_default_driver)

    # Define the attributes for each driver
    #
    # A driver can be predefined ('chrome, 'chrome-headless', 'firefox', etc)
    # or it can be defined by the user with the 'remote_browsers' setting.
    # Remote browsers have extra details such as capabilities
    #
    # Each driver must have the following attributes:
    # 'name': real name,
    # 'full_name': the remote_browser name defined by the user,
    # 'remote': is this a remote_browser or not
    # 'capabilities': full capabilities defined in the remote_browsers setting
    remote_browsers = settings_manager.get_remote_browsers(
        test_execution.settings)
    default_browsers = gui_utils.get_supported_browsers_suggestions()
    execution['drivers'] = _define_drivers(selected_drivers, remote_browsers,
                                           default_browsers)

    # Generate timestamp if needed
    # A timestamp is passed when the test is executed from the GUI.
    # The gui uses this timestamp to fetch the test execution status later on.
    # Otherwise, a new timestamp should be generated at this point
    if not test_execution.timestamp:
        test_execution.timestamp = utils.get_timestamp()

    # Select which envs to use
    # The user can define environments in the environments.json file.
    # The suite/test can be executed in one or more of these environments.
    # Which environments to use is defined by this order of preference:
    # 1. envs passed by CLI
    # 2. envs defined inside the suite
    # 3. The first env defined
    # 4. no envs at all
    #
    # Note, in the case of 4, the test might fail if it tries
    # to use env variables
    cli_envs = test_execution.cli_environments
    project_envs = environment_manager.get_envs(workspace, project)
    execution['environments'] = _select_environments(cli_envs, suite_envs,
                                                     project_envs)

    # Generate the execution list
    #
    # Each test must be executed for each:
    # * data set
    # * environment
    # * driver
    #
    # The result is a list that contains all the requested combinations
    execution_list = _define_execution_list(workspace, project, execution)

    # create the execution directory
    #
    # if this is a suite, the directory takes this structure
    #   reports/<suite_name>/<timestamp>/
    #
    # if this is a single test, the directory takes this structure:
    #   reports/single_tests/<test_name>/<timestamp>/
    execution_directory = _create_execution_directory(workspace,
                                                      project,
                                                      test_execution.timestamp,
                                                      test_name=test,
                                                      suite_name=suite_name,
                                                      is_suite=is_suite)
    # for each test, create the test directory
    # for example, in a suite 'suite1' with a 'test1':
    # reports/suite1/2017.07.02.19.22.20.001/test1/set_00001/
    for test in execution_list:
        report_directory = report.create_report_directory(
            execution_directory, test['test_name'], is_suite)
        test['report_directory'] = report_directory

    # EXECUTION

    start_time = time.time()
    suite_error = False

    # run suite `before` function
    if execution['suite_before']:
        try:
            execution['suite_before'].__call__()
        except:
            print('ERROR: suite before function failed')
            print(traceback.format_exc())

    if not suite_error:
        if test_execution.interactive and execution['workers'] != 1:
            print('WARNING: to run in debug mode, threads must equal one')

        if execution['workers'] == 1:
            # run tests serially
            for test in execution_list:
                run_test(workspace, project, test['test_name'],
                         test['data_set'], test['driver'],
                         test_execution.settings, test['report_directory'])
        else:
            # run tests using multiprocessing
            multiprocess_executor(execution_list, execution['workers'])

    # run suite `after` function
    if execution['suite_after']:
        try:
            execution['suite_after'].__call__()
        except:
            print('ERROR: suite before function failed')
            print(traceback.format_exc())

    # generate execution_result.json
    elapsed_time = round(time.time() - start_time, 2)
    report_parser.generate_execution_report(execution_directory, elapsed_time)