コード例 #1
0
ファイル: test_runner.py プロジェクト: sampadrout/golem
    def finalize(self):
        test_end_time = time.time()
        test_elapsed_time = round(test_end_time - self.test_start_time, 2)
        if self.result['result'] not in [
                ResultsEnum.CODE_ERROR, ResultsEnum.FAILURE
        ]:
            if execution.errors:
                self.result['result'] = ResultsEnum.ERROR

        if self.result['result'] is None:
            self.result['result'] = ResultsEnum.SUCCESS
        execution.logger.info('Test Result: {}'.format(
            self.result['result'].upper()))

        self.result['description'] = execution.description
        self.result['steps'] = execution.steps
        self.result['errors'] = execution.errors
        self.result['test_elapsed_time'] = test_elapsed_time
        self.result['test_timestamp'] = self.test_timestamp
        self.result['browser'] = execution.browser_definition['name']
        self.result['browser_full_name'] = execution.browser_definition[
            'full_name']
        # Report a test has failed in the test execution, this will later determine the exit status
        _error_codes = [
            ResultsEnum.CODE_ERROR, ResultsEnum.ERROR, ResultsEnum.FAILURE
        ]
        if self.execution_has_failed_tests is not None and self.result[
                'result'] in _error_codes:
            self.execution_has_failed_tests.value = True
        report.generate_report(self.report_directory, self.test.name,
                               execution.data, self.result)
        execution_logger.reset_logger(execution.logger)
コード例 #2
0
    def finalize(self):
        test_end_time = time.time()
        test_elapsed_time = round(test_end_time - self.test_start_time, 2)
        if self.result['result'] not in [
                ResultsEnum.CODE_ERROR, ResultsEnum.FAILURE
        ]:
            if execution.errors:
                self.result['result'] = ResultsEnum.ERROR
            else:
                self.result['result'] = ResultsEnum.SUCCESS
        execution.logger.info('Test Result: {}'.format(
            self.result['result'].upper()))

        self.result['description'] = execution.description
        self.result['steps'] = execution.steps
        self.result['errors'] = execution.errors
        self.result['test_elapsed_time'] = test_elapsed_time
        self.result['test_timestamp'] = self.test_timestamp
        self.result['browser'] = execution.browser_definition['name']
        self.result['browser_full_name'] = execution.browser_definition[
            'full_name']
        report.generate_report(self.report_directory, self.test_name,
                               execution.data, self.result)
        execution_logger.reset_logger(execution.logger)
        execution._reset()
コード例 #3
0
    def test_generate_report_with_env(self, permanent_project_fixture):
        project = permanent_project_fixture['name']
        testdir = permanent_project_fixture['testdir']
        timestamp = utils.get_timestamp()
        test_name = 'testing_report_003'
        exec_dir = report.create_execution_directory(testdir,
                                                     project,
                                                     timestamp,
                                                     test_name=test_name)
        report_dir = report.create_report_directory(exec_dir,
                                                    test_name,
                                                    is_suite=True)
        test_data = {
            'env': {
                'name': 'env01',
                'url': '1.1.1.1'
            },
            'var2': 'value2'
        }
        test_data = test_runner.Data(test_data)

        result = {
            'result': 'pass',
            'error': '',
            'description': 'description of the test',
            'steps': ['step1', 'step2'],
            'test_elapsed_time': 22.22,
            'test_timestamp': '2018.02.04.02.16.42.729',
            'browser': 'chrome',
            'browser_full_name': '',
            'set_name': 'set_001',
        }
        report.generate_report(report_dir, test_name, test_data, result)
        expected = {
            'test_case': test_name,
            'result': 'pass',
            'steps': ['step1', 'step2'],
            'description': 'description of the test',
            'error': '',
            'short_error': '',
            'test_elapsed_time': 22.22,
            'test_timestamp': '2018.02.04.02.16.42.729',
            'browser': 'chrome',
            'environment': 'env01',
            'set_name': 'set_001',
            'test_data': {
                'env': "{'name': 'env01', 'url': '1.1.1.1'}",
                'var2': "'value2'"
            }
        }
        path = os.path.join(report_dir, 'report.json')
        with open(path) as report_file:
            actual = json.load(report_file)
            assert actual == expected
コード例 #4
0
ファイル: report_test.py プロジェクト: devilrancy/golem
 def test_generate_report(self, project_session):
     project = project_session['name']
     testdir = project_session['testdir']
     timestamp = utils.get_timestamp()
     test_name = 'testing_report_002'
     exec_dir = report.create_execution_directory(testdir, project, timestamp,
                                                  test_name=test_name)
     report_dir = report.create_report_directory(exec_dir, test_name,
                                                 is_suite=True)
     test_data = {
         'var1': 'value1',
         'var2': 'value2'
     }
     result = {
         'result': 'pass',
         'error': '',
         'description': 'description of the test',
         'steps': ['step1', 'step2'],
         'test_elapsed_time': 22.22,
         'test_timestamp': '2018.02.04.02.16.42.729',
         'browser': 'chrome',
         'browser_full_name': '',
         'set_name': 'set_001',
     }
     report.generate_report(report_dir, test_name, test_data, result, timestamp, hash(timestamp + str(test_data)))
     expected = {
         'test_case': test_name,
         'result': 'pass',
         'steps': ['step1', 'step2'],
         'description': 'description of the test',
         'error': '',
         'short_error': '',
         'test_elapsed_time': 22.22,
         'test_timestamp': '2018.02.04.02.16.42.729',
         'browser': 'chrome',
         'environment': '',
         'set_name': 'set_001',
         'suite_timestamp': timestamp,
         'test_id': hash(timestamp + str(test_data)),
         'user': getpass.getuser(),
         'hostname': socket.gethostname(),
         'test_data': {
             'var1': "'value1'",
             'var2': "'value2'"
         }
     }
     path = os.path.join(report_dir, 'report.json')
     with open(path) as report_file:
         actual = json.load(report_file)
         assert actual == expected
コード例 #5
0
ファイル: report_test.py プロジェクト: r-roos/golem
 def test_generate_report_with_env(self, project_session):
     timestamp = utils.get_timestamp()
     test_name = 'testing_report_003'
     exec_dir = report.create_execution_directory(project_session.testdir,
                                                  project_session.name, timestamp,
                                                  test_name=test_name)
     report_dir = report.create_report_directory(exec_dir, test_name, is_suite=True)
     test_data = {
         'env': {
             'name': 'env01',
             'url': '1.1.1.1'
         },
         'var2': 'value2'
     }
     test_data = test_runner.Data(test_data)
     result = {
         'result': 'success',
         'errors': [],
         'description': 'description of the test',
         'steps': [
             {'message': 'step1', 'screenshot': None, 'error': None},
             {'message': 'step2', 'screenshot': None, 'error': None}
         ],
         'test_elapsed_time': 22.22,
         'test_timestamp': '2018.02.04.02.16.42.729',
         'browser': 'chrome',
         'browser_full_name': '',
         'set_name': 'set_001',
     }
     report.generate_report(report_dir, test_name, test_data, result)
     path = os.path.join(report_dir, 'report.json')
     with open(path) as report_file:
         actual = json.load(report_file)
         assert len(actual.items()) == 11
         assert actual['test_case'] == test_name
         assert actual['result'] == 'success'
         assert actual['steps'][0]['message'] == 'step1'
         assert actual['steps'][1]['message'] == 'step2'
         assert actual['description'] == 'description of the test'
         assert actual['errors'] == []
         assert actual['test_elapsed_time'] == 22.22
         assert actual['test_timestamp'] == '2018.02.04.02.16.42.729'
         assert actual['browser'] == 'chrome'
         assert actual['environment'] == 'env01'
         assert actual['set_name'] == 'set_001'
         test_data_a = "{'url': '1.1.1.1', 'name': 'env01'}"
         test_data_b = "{'name': 'env01', 'url': '1.1.1.1'}"
         assert actual['test_data']['env'] in [test_data_a, test_data_b]
         assert actual['test_data']['var2'] == "'value2'"
コード例 #6
0
ファイル: test_runner.py プロジェクト: IbnuFajar/golem
def run_test(workspace, project, test_name, test_data, browser, settings,
             report_directory):
    ''' runs a single test case by name'''
    result = {
        'result': 'pass',
        'error': '',
        'description': '',
        'steps': [],
        'test_elapsed_time': None,
        'test_timestamp': None,
        'browser': '',
        'browser_full_name': '',
        'set_name': '',
    }

    from golem.core import execution_logger
    from golem import actions
    from golem import execution

    # convert test_data to data obj
    class Data(dict):
        """dot notation access to dictionary attributes"""
        def __getattr__(*args):
            val = dict.get(*args)
            return Data(val) if type(val) is dict else val

        __setattr__ = dict.__setitem__
        __delattr__ = dict.__delitem__

    execution.data = Data(test_data)

    # set set_name
    # set name is the value of 'set_name' if present in the data table
    # if it is not present, use the value of the first column in the data table
    # if there's no data in the data table, leave set_name as ''
    if 'set_name' in test_data:
        result['set_name'] = test_data['set_name']
    elif test_data:
        result['set_name'] = test_data[next(iter(test_data))]

    logger = execution_logger.get_logger(report_directory,
                                         settings['console_log_level'],
                                         settings['file_log_level'],
                                         settings['log_all_events'])
    execution.logger = logger

    # Print execution info to console
    logger.info('Test execution started: {}'.format(test_name))
    logger.info('Browser: {}'.format(browser['name']))
    if 'env' in test_data:
        if 'name' in test_data['env']:
            logger.info('Environment: {}'.format(test_data['env']['name']))
    if test_data:
        data_string = '\n'
        for key, value in test_data.items():
            if key == 'env':
                if 'url' in value:
                    data_string += '    {}: {}\n'.format('url', value['url'])
            else:
                data_string += '    {}: {}\n'.format(key, value)
        logger.info('Using data: {}'.format(data_string))

    test_timestamp = utils.get_timestamp()
    test_start_time = time.time()
    execution.project = project
    execution.workspace = workspace
    execution.browser_definition = browser
    execution.settings = settings
    execution.report_directory = report_directory

    # add the 'project' directory to python path
    # so it's possible to make relative imports from the test
    # example, some_test.py
    # from pages import some_page
    sys.path.append(os.path.join(workspace, 'projects', project))

    test_module = None

    try:
        if '/' in test_name:
            test_name = test_name.replace('/', '.')
        test_module = importlib.import_module('projects.{0}.tests.{1}'.format(
            project, test_name))

        # import the page objects into the test module
        if hasattr(test_module, 'pages'):
            for page in test_module.pages:
                test_module = utils.generate_page_object_module(
                    project, test_module, page, page.split('.'))

        # import logger into the test module
        setattr(test_module, 'logger', execution.logger)

        # import actions into the test module
        for action in dir(actions):
            setattr(test_module, action, getattr(actions, action))

        # log description
        if hasattr(test_module, 'description'):
            execution.description = test_module.description
        else:
            logger.info('Test does not have description')

        # run setup method
        if hasattr(test_module, 'setup'):
            test_module.setup(execution.data)
        else:
            logger.info('Test does not have setup function')

        if hasattr(test_module, 'test'):
            test_module.test(execution.data)
        else:
            raise Exception('Test does not have test function')

        if settings['screenshot_on_end'] and execution.browser:
            actions.capture('test end')
    except:
        result['result'] = 'fail'
        result['error'] = traceback.format_exc()
        try:
            if settings['screenshot_on_error'] and execution.browser:
                actions.capture('error')
        except:
            # if the test failed and driver is not available
            # capture screenshot is not possible, continue
            pass

        logger.error('An error ocurred:', exc_info=True)

    try:
        if hasattr(test_module, 'teardown'):
            test_module.teardown(execution.data)
        else:
            logger.info('Test does not have a teardown function')
    except:
        result['result'] = 'fail'
        result['error'] += '\n\nteardown failed'
        result['error'] += '\n' + traceback.format_exc()
        logger.error('An error ocurred in the teardown:', exc_info=True)

    # if there is no teardown or teardown failed or it did not close the driver,
    # let's try to close the driver manually
    if execution.browser:
        try:
            execution.browser.quit()
        except:
            # if this fails, we have lost control over the webdriver window
            # and we are not going to be able to close it
            logger.error('There was an error closing the driver')
            logger.error(traceback.format_exc())
        finally:
            execution.browser = None

    test_end_time = time.time()
    test_elapsed_time = round(test_end_time - test_start_time, 2)

    if not result['error']:
        logger.info('Test passed')

    result['description'] = execution.description
    result['steps'] = execution.steps
    result['test_elapsed_time'] = test_elapsed_time
    result['test_timestamp'] = test_timestamp
    result['browser'] = execution.browser_definition['name']
    result['browser_full_name'] = execution.browser_definition['full_name']

    # remove golem.execution from sys.modules to guarantee thread safety
    #sys.modules['golem.execution'] = None

    report.generate_report(report_directory, test_name, execution.data, result)
    del execution
    return
コード例 #7
0
ファイル: test_runner.py プロジェクト: FabioRosado/golem
def run_test(workspace, project, test_name, test_data, browser,
             settings, report_directory):
    ''' runs a single test case by name'''
    result = {
        'result': 'pass',
        'error': '',
        'description': '',
        'steps': [],
        'test_elapsed_time': None,
        'test_timestamp': None,
        'browser': '',
        'browser_full_name': '',
        'set_name': '',
    }

    from golem.core import execution_logger
    from golem import actions
    from golem import execution

    # convert test_data to data obj
    class Data(dict):
        """dot notation access to dictionary attributes"""
        def __getattr__(*args):
            val = dict.get(*args)
            return Data(val) if type(val) is dict else val

        __setattr__ = dict.__setitem__
        __delattr__ = dict.__delitem__

    execution.data = Data(test_data)

    # set set_name
    # set name is the value of 'set_name' if present in the data table
    # if it is not present, use the value of the first column in the data table
    # if there's no data in the data table, leave set_name as ''
    _set_name = ''
    if 'set_name' in test_data:
        _set_name = test_data['set_name']
    elif test_data:
        data_without_env = dict(test_data)
        data_without_env.pop('env', None)
        if data_without_env:
            _set_name = test_data[next(iter(data_without_env))]
    result['set_name'] = _set_name

    logger = execution_logger.get_logger(report_directory,
                                         settings['console_log_level'],
                                         settings['log_all_events'])
    execution.logger = logger
    # Print execution info to console
    logger.info('Test execution started: {}'.format(test_name))
    logger.info('Browser: {}'.format(browser['name']))
    if 'env' in test_data:
        if 'name' in test_data['env']:
            logger.info('Environment: {}'.format(test_data['env']['name']))
    if test_data:
        data_string = '\n'
        for key, value in test_data.items():
            if key == 'env':
                if 'url' in value:
                    data_string += '    {}: {}\n'.format('url', value['url'])
            else:
                data_string += '    {}: {}\n'.format(key, value)
        logger.info('Using data: {}'.format(data_string))

    test_timestamp = utils.get_timestamp()
    test_start_time = time.time()
    execution.project = project
    execution.workspace = workspace
    execution.browser_definition = browser
    execution.settings = settings
    execution.report_directory = report_directory

    # add the 'project' directory to python path
    # so it's possible to make relative imports from the test
    # example, some_test.py
    # from pages import some_page
    sys.path.append(os.path.join(workspace, 'projects', project))

    test_module = None

    try:
        if '/' in test_name:
            test_name = test_name.replace('/', '.')
        test_module = importlib.import_module(
            'projects.{0}.tests.{1}'.format(project, test_name))

        # import the page objects into the test module
        if hasattr(test_module, 'pages'):
            for page in test_module.pages:
                test_module = utils.generate_page_object_module(project, test_module,
                                                                page, page.split('.'))

        # import logger into the test module
        setattr(test_module, 'logger', execution.logger)

        # import actions into the test module
        for action in dir(actions):
            setattr(test_module, action, getattr(actions, action))

        # log description
        if hasattr(test_module, 'description'):
            execution.description = test_module.description
        else:
            logger.info('Test does not have description')

        # run setup method
        if hasattr(test_module, 'setup'):
            test_module.setup(execution.data)
        else:
            logger.info('Test does not have setup function')

        if hasattr(test_module, 'test'):
            test_module.test(execution.data)
        else:
            raise Exception('Test does not have test function')

        if settings['screenshot_on_end'] and execution.browser:
            actions.capture('test end')
    except:
        result['result'] = 'fail'
        result['error'] = traceback.format_exc()
        try:
            if settings['screenshot_on_error'] and execution.browser:
                actions.capture('error')
        except:
            # if the test failed and driver is not available
            # capture screenshot is not possible, continue
            pass

        logger.error('An error ocurred:', exc_info=True)

    try:
        if hasattr(test_module, 'teardown'):
            test_module.teardown(execution.data)
        else:
            logger.info('Test does not have a teardown function')
    except:
        result['result'] = 'fail'
        result['error'] += '\n\nteardown failed'
        result['error'] += '\n' + traceback.format_exc()
        logger.error('An error ocurred in the teardown:', exc_info=True)

    # if there is no teardown or teardown failed or it did not close the driver,
    # let's try to close the driver manually
    if execution.browser:
        try:
            for browser, driver in execution.browsers.items():
                driver.quit()
        except:
            # if this fails, we have lost control over the webdriver window
            # and we are not going to be able to close it
            logger.error('There was an error closing the driver')
            logger.error(traceback.format_exc())
        finally:
            execution.browser = None

    test_end_time = time.time()
    test_elapsed_time = round(test_end_time - test_start_time, 2)

    if not result['error']:
        logger.info('Test passed')

    result['description'] = execution.description
    result['steps'] = execution.steps
    result['test_elapsed_time'] = test_elapsed_time
    result['test_timestamp'] = test_timestamp
    result['browser'] = execution.browser_definition['name']
    result['browser_full_name'] = execution.browser_definition['full_name']

    # remove golem.execution from sys.modules to guarantee thread safety
    #sys.modules['golem.execution'] = None
    
    report.generate_report(report_directory, test_name, execution.data, result)
    # del execution
    sys.modules['golem.execution'] = None
    return
コード例 #8
0
def run_test(workspace, project, test_name, test_data, driver, settings,
             report_directory):
    ''' runs a single test case by name'''
    result = {
        'result': 'pass',
        'error': '',
        'description': '',
        'steps': [],
        'test_elapsed_time': None,
        'test_timestamp': None,
        'browser': driver
    }

    from golem.core import execution_logger
    from golem import actions

    # convert test_data to data obj
    # TODO convert data dict to data obj
    class data:
        pass

    new_data_class = data()
    for key, value in test_data.items():
        setattr(new_data_class, key, value)

    execution_logger.get_logger(report_directory,
                                settings['console_log_level'],
                                settings['file_log_level'],
                                settings['log_all_events'])

    execution_logger.logger.info(
        'Test execution started: {}'.format(test_name))
    execution_logger.logger.info('Driver: {}'.format(driver))
    if test_data:
        data_string = '\n'
        for key, value in test_data.items():
            data_string += '    {}: {}\n'.format(key, value)
        execution_logger.logger.info('Using data: {}'.format(data_string))

    test_timestamp = utils.get_timestamp()
    test_start_time = time.time()

    golem.core.project = project
    golem.core.workspace = workspace
    golem.core.test_data = new_data_class
    golem.core.driver_name = driver
    golem.core.set_settings(settings)
    golem.core.report_directory = report_directory

    test_module = None

    try:
        test_module = importlib.import_module('projects.{0}.tests.{1}'.format(
            project, test_name))

        # import the page objects into the test module
        if hasattr(test_module, 'pages'):
            for page in test_module.pages:
                test_module = utils.generate_page_object_module(
                    project, test_module, page, page.split('.'))

        # import logger into the test module
        setattr(test_module, 'logger', golem.core.execution_logger)

        # import actions into the test module
        for action in dir(golem.actions):
            setattr(test_module, action, getattr(golem.actions, action))

        # log description
        if hasattr(test_module, 'description'):
            golem.core.execution_logger.description = test_module.description
        else:
            execution_logger.logger.info('Test does not have description')

        # run setup method
        if hasattr(test_module, 'setup'):
            test_module.setup(golem.core.test_data)
        else:
            execution_logger.logger.info('Test does not have setup function')

        if hasattr(test_module, 'test'):
            test_module.test(golem.core.test_data)
        else:
            raise Exception('Test does not have test function')
    except:
        result['result'] = 'fail'
        result['error'] = traceback.format_exc()
        try:
            if settings['screenshot_on_error'] and golem.core.driver:
                actions.capture('error')
        except:
            # if the test failed and driver is not available
            # capture screenshot is not possible, continue
            pass

        execution_logger.logger.error('An error ocurred:', exc_info=True)

    try:
        if hasattr(test_module, 'teardown'):
            test_module.teardown(golem.core.test_data)
        else:
            execution_logger.logger.info(
                'Test does not have a teardown function')
    except:
        result['result'] = 'fail'
        result['error'] += '\n\nteardown failed'
        result['error'] += '\n' + traceback.format_exc()
        execution_logger.logger.error('An error ocurred in the teardown:',
                                      exc_info=True)

    # if there is no teardown or teardown failed or it did not close the driver,
    # let's try to close the driver manually
    if golem.core.driver:
        try:
            golem.core.driver.quit()
        except:
            # if this fails, we have lost control over the webdriver window
            # and we are not going to be able to close it
            execution_logger.logger.error(
                'There was an error closing the driver')
            execution_logger.logger.error(traceback.format_exc())
        finally:
            golem.core.driver = None

    test_end_time = time.time()
    test_elapsed_time = round(test_end_time - test_start_time, 2)

    if not result['error']:
        execution_logger.logger.info('Test passed')

    result['description'] = execution_logger.description
    result['steps'] = execution_logger.steps
    result['test_elapsed_time'] = test_elapsed_time
    result['test_timestamp'] = test_timestamp
    result['browser'] = golem.core.get_selected_driver()

    execution_logger.description = None
    execution_logger.steps = []
    execution_logger.screenshots = {}
    report.generate_report(report_directory, test_name, golem.core.test_data,
                           result)
    return
コード例 #9
0
ファイル: test_runner.py プロジェクト: lucianopuccio/golem
def test_runner(workspace, project, test_case_name, test_data, suite_name,
                suite_data, suite_timestamp, settings):
    ''' runs a single test case by name'''
    result = {
        'result': 'pass',
        'error': None,
        'description': None,
        'steps': None,
        'test_elapsed_time': None,
        'test_timestamp': None}

    import execution_logger
    instance = None
    test_timestamp = utils.get_timestamp()
    test_start_time = time.time()

    golem.core.set_settings(settings)

    # create a directory to store report.json and screenshots
    report_directory = report.create_report_directory(workspace,
                                                      project,
                                                      test_case_name,
                                                      suite_name,
                                                      suite_timestamp)
    try:
        test_class = utils.get_test_case_class(
                        project,
                        test_case_name)
        instance = test_class()

        if hasattr(instance, 'setup'):
            instance.setup()
        else:
            raise Exception

        if hasattr(instance, 'test'):
            instance.test(test_data)
        else:
            raise Exception

    except:
        result['result'] = 'fail'
        result['error'] = traceback.format_exc()
        if settings['screenshot_on_error']:
            actions.capture('error')
        print dir(traceback)
        print traceback.print_exc()

    try:
        if hasattr(instance, 'teardown'):
            instance.teardown()
        else:
            raise Exception
    except:
        result['result'] = 'fail'
        result['error'] = 'teardown failed'

    test_end_time = time.time()
    test_elapsed_time = round(test_end_time - test_start_time, 2)

    result['description'] = execution_logger.description
    result['steps'] = execution_logger.steps
    result['test_elapsed_time'] = test_elapsed_time
    result['test_timestamp'] = test_timestamp
    result['screenshots'] = execution_logger.screenshots

    report.generate_report(report_directory,
                           test_case_name,
                           test_data,
                           result)
    return result