Exemplo n.º 1
0
 def __init__(self, workspace, project, test_name, test_data, secrets,
              browser, settings, report_directory):
     self.result = {
         'result': '',
         'errors': [],
         'description': '',
         'steps': [],
         'test_elapsed_time': None,
         'test_timestamp': None,
         'browser': '',
         'browser_full_name': '',
         'set_name': '',
     }
     self.workspace = workspace
     self.project = project
     self.test_name = test_name
     self.test_data = test_data
     self.secrets = secrets
     self.browser = browser
     self.settings = settings
     self.report_directory = report_directory
     self.test_module = None
     self.test_timestamp = utils.get_timestamp()
     self.test_start_time = time.time()
     self.logger = None
Exemplo n.º 2
0
def run_suite(project, suite_name):
    """Run a suite. This is used when running suites from the GUI"""
    script_name = sys.argv[0]
    timestamp = utils.get_timestamp()
    subprocess.Popen(
        [script_name, 'run', project, suite_name, '--timestamp', timestamp])
    return timestamp
Exemplo n.º 3
0
def run_suite(project, suite_name):
    timestamp = utils.get_timestamp()
    subprocess.Popen([
        'python', 'golem.py', 'run', project, suite_name, '--timestamp',
        timestamp
    ])
    return timestamp
Exemplo n.º 4
0
def multiprocess_executor(execution_list, processes=1,
                          suite_name=None, suite_data=None):
    print 'execution list', execution_list
    timestamp = utils.get_timestamp()

    if not suite_name:
        suite_name = '__single__'

    pool = Pool(processes=processes)

    results = []
    for test in execution_list:
        apply_async = pool.apply_async(test_runner,
                                       args=(test_execution.root_path,
                                             test_execution.project,
                                             test[0],
                                             test[1],
                                             suite_name,
                                             suite_data,
                                             timestamp,
                                             test_execution.settings),
                                       callback=logger.log_result)
        results.append(apply_async)

    map(ApplyResult.wait, results)

    lst_results = [r.get() for r in results]

    #for res in lst_results:
    #    print '\none result\n',res

    pool.close()
    pool.join()
Exemplo n.º 5
0
 def test_run_suite_filter_by_invalid_tag_expression(
         self, _project_with_tags, test_utils, capsys):
     """When a invalid tag expression is used a message is displayed
     to the console, no tests are run, the report is generated,
     and the execution exists with status code 1
     """
     _, project = _project_with_tags.activate()
     suite_name = test_utils.random_numeric_string(10, 'suite')
     tests = [
         _project_with_tags.t.test_alfa_bravo,
         _project_with_tags.t.test_bravo_charlie
     ]
     test_utils.create_suite(project, suite_name, tests=tests)
     timestamp = utils.get_timestamp()
     execution_runner = exc_runner.ExecutionRunner(browsers=['chrome'],
                                                   timestamp=timestamp,
                                                   tags=['sierra = tango'])
     execution_runner.project = project
     with pytest.raises(SystemExit):
         execution_runner.run_suite(suite_name)
     out, err = capsys.readouterr()
     expected = (
         "InvalidTagExpression: unknown expression <class '_ast.Assign'>, the "
         "only valid operators for tag expressions are: 'and', 'or' & 'not'"
     )
     assert expected in out
     data = report_parser.get_execution_data(project=project,
                                             suite=suite_name,
                                             execution=timestamp)
     assert data['has_finished'] is True
     assert data['total_tests'] == 0
Exemplo n.º 6
0
    def test_run_with_not_existing_environments(self, project_function,
                                                test_utils, capsys):
        """Run tests with a not existing environment.
        It should throw an error and finish with status code 1
        """
        _, project = project_function.activate()
        test_utils.create_test(project, 'test01')
        timestamp = utils.get_timestamp()
        execution_runner = exc_runner.ExecutionRunner(
            browsers=['chrome'],
            timestamp=timestamp,
            environments=['not_existing'])
        execution_runner.project = project
        with pytest.raises(SystemExit) as wrapped_execution:
            execution_runner.run_directory('')

        assert wrapped_execution.value.code == 1
        out, err = capsys.readouterr()
        msg = (
            'ERROR: the following environments do not exist for project {}: '
            'not_existing'.format(project))
        assert msg in out
        data = exec_report.get_execution_data(project=project,
                                              suite='all',
                                              execution=timestamp)
        assert data['has_finished'] is True
        assert data['total_tests'] == 0
Exemplo n.º 7
0
def multiprocess_executor(execution_list, threads=1):
    """Runs a list of tests in parallel using multiprocessing.

    execution_list is a list of dictionaries, each containing:
      'test_name',
      'data_set',
      'driver',
      'report_directory'
    """
    print('Executing:')
    for test in execution_list:
        print('{} in {} with the following data: {}'.format(
            test['test_name'], test['driver']['name'], test['data_set']))
    # TODO test that a worker is used once and then replaced
    pool = Pool(processes=threads, maxtasksperchild=1)
    results = []
    for test in execution_list:
        args = (test_execution.root_path, test_execution.project,
                test['test_name'], test['data_set'], test['driver'],
                test_execution.settings, test['report_directory'],
                test_execution.timestamp,
                hash(test_execution.timestamp + utils.get_timestamp() +
                     str(test['data_set'])))
        apply_async = pool.apply_async(run_test, args=args)
        results.append(apply_async)
    map(ApplyResult.wait, results)
    # lst_results = [r.get() for r in results]
    pool.close()
    pool.join()
Exemplo n.º 8
0
    def __init__(self, testdir, project, test_name, test_data, secrets, browser, env_name,
                 settings, exec_report_dir, set_name, test_functions_to_run=None,
                 execution_has_failed_tests=None, tags=None, from_suite=False):
        self.testdir = testdir
        self.project = Project(project)
        self.test = Test(project, test_name)
        self.test_data = test_data
        self.secrets = secrets
        self.browser = browser
        self.env_name = env_name
        self.settings = settings
        self.exec_report_dir = exec_report_dir
        self.set_name = set_name
        # When test_functions_to_run is empty or None, all the test functions
        # defined in the test file will be run
        self.test_functions_to_run = test_functions_to_run or []
        self.test_hooks = {
            'before_test': [],
            'before_each': [],
            'after_each': [],
            'after_test': []
        }
        self.execution_has_failed_tests = execution_has_failed_tests
        self.execution_tags = tags or []
        self.from_suite = from_suite
        self.global_skip = False
        self.skip_tests = False

        self.result = None
        self.reportdir = None
        self.test_module = None
        self.test_functions = {}
        self.test_timestamp = utils.get_timestamp()
        self.logger = None
Exemplo n.º 9
0
def run_test(project,
             test_name,
             browsers=None,
             environments=None,
             processes=1):
    """Run a test case. This is used when running tests from the GUI"""
    script_name = sys.argv[0]
    timestamp = utils.get_timestamp()
    param_list = [
        script_name, 'run', project, test_name, '--timestamp', timestamp
    ]

    if browsers:
        param_list.append('--browsers')
        for browser in browsers:
            param_list.append(browser)
    if environments:
        param_list.append('--environments')
        for environment in environments:
            param_list.append(environment)
    if processes:
        param_list.append('--processes')
        param_list.append(str(processes))

    subprocess.Popen(param_list)
    return timestamp
Exemplo n.º 10
0
 def test_initialize_test_file_report(self, project_session, test_utils):
     _, project = project_session.activate()
     # create a test
     test_file = test_utils.random_string()
     content = 'def test_one(data):\n' \
               '    pass\n' \
               'def test_two(data):\n' \
               '    pass'
     test_utils.create_test(project, test_file, content)
     # create test file reportdir
     execution = test_file
     timestamp = utils.get_timestamp()
     exec_dir = create_execution_directory(project, test_file, timestamp)
     test_file_reportdir = create_test_file_report_dir(
         exec_dir, test_file, '')
     # initialize report for test file
     test_report.initialize_test_file_report(test_file,
                                             ['test_one', 'test_two'], '',
                                             test_file_reportdir, '', '')
     test_file_report = test_report.get_test_file_report_json(
         project, execution, timestamp, test_file)
     assert len(test_file_report) == 2
     assert any(
         t['test'] == 'test_one' and t['result'] == ResultsEnum.PENDING
         for t in test_file_report)
     assert any(
         t['test'] == 'test_two' and t['result'] == ResultsEnum.PENDING
         for t in test_file_report)
Exemplo n.º 11
0
 def test_create_report_directory_test_without_set(self, project_session):
     testdir, project = project_session.activate()
     timestamp = utils.get_timestamp()
     test_name = 'testing_report_001'
     exec_dir = create_execution_directory(project, test_name, timestamp)
     directory = create_test_file_report_dir(exec_dir, test_name, '')
     assert os.path.isdir(directory)
Exemplo n.º 12
0
 def __init__(self, testdir, project, test_name, test_data, secrets, browser,
              settings, report_directory, execution_has_failed_tests=None, tags=None):
     self.result = {
         'result': '',
         'errors': [],
         'description': '',
         'steps': [],
         'test_elapsed_time': None,
         'test_timestamp': None,
         'browser': '',
         'browser_full_name': '',
         'set_name': '',
     }
     self.testdir = testdir
     self.project = project
     self.test_name = test_name
     self.test_data = test_data
     self.secrets = secrets
     self.browser = browser
     self.settings = settings
     self.report_directory = report_directory
     self.test_module = None
     self.test_timestamp = utils.get_timestamp()
     self.test_start_time = time.time()
     self.logger = None
     self.execution_has_failed_tests = execution_has_failed_tests
     self.execution_tags = tags or []
Exemplo n.º 13
0
 def test_run_single_test_with_two_sets(self, project_class, test_utils,
                                        capsys):
     """Run a single test with two data sets.
     It should display the number of tests and test sets found."""
     testdir, project = project_class.activate()
     test_name = 'foo002'
     timestamp = utils.get_timestamp()
     session.settings = settings_manager.get_project_settings(project)
     content = ('data = [{"foo": 1}, {"foo": 2}]\n'
                'def test(data):\n'
                '    pass\n')
     test_utils.create_test(project, test_name, content=content)
     execution_runner = exc_runner.ExecutionRunner(browsers=['chrome'],
                                                   timestamp=timestamp)
     execution_runner.project = project
     execution_runner.run_test(test_name)
     out, err = capsys.readouterr()
     # number of tests is displayed
     assert 'Tests found: 1 (2 sets)' in out
     test_report_dir = os.path.join(testdir, 'projects', project, 'reports',
                                    'single_tests', test_name, timestamp)
     assert os.path.isdir(test_report_dir)
     items = os.listdir(test_report_dir)
     # two test set dirs + report.json
     assert len(items) == 3
Exemplo n.º 14
0
 def test_create_report_directory_suite(self, project_session):
     testdir, project = project_session.activate()
     timestamp = utils.get_timestamp()
     suite_name = 'suite_foo_002'
     test_name = 'testing_report_002'
     exec_dir = create_execution_directory(project, suite_name, timestamp)
     directory = create_report_directory(exec_dir, test_name, is_suite=True)
     assert os.path.isdir(directory)
Exemplo n.º 15
0
 def test_create_execution_directory_suite_parents(self, project_session):
     testdir, project = project_session.activate()
     timestamp = utils.get_timestamp()
     suite_name = 'a.b.suite_execution_directory'
     directory = create_execution_directory(project, suite_name, timestamp)
     path = os.path.join(project_session.path, 'reports', suite_name, timestamp)
     assert os.path.isdir(path)
     assert directory == path
Exemplo n.º 16
0
 def test_create_report_directory_suite(self, project_session):
     timestamp = utils.get_timestamp()
     test_name = 'testing_report_002'
     exec_dir = report.create_execution_directory(project_session.testdir,
                                                  project_session.name, timestamp,
                                                  test_name=test_name)
     directory = report.create_report_directory(exec_dir, test_name, is_suite=True)
     assert os.path.isdir(directory)
Exemplo n.º 17
0
 def test_create_execution_dir_single_test_parents(self, project_session, test_utils):
     _, project = project_session.activate()
     timestamp = utils.get_timestamp()
     test_name = 'foo.bar.{}'.format(test_utils.random_string())
     directory = create_execution_dir_single_test(project, test_name, timestamp)
     path = os.path.join(project_session.path, 'reports', 'single_tests', test_name, timestamp)
     assert os.path.isdir(path)
     assert directory == path
Exemplo n.º 18
0
 def test_create_execution_directory_for_single_test(self, project_session, test_utils):
     _, project = project_session.activate()
     timestamp = utils.get_timestamp()
     test_name = test_utils.random_string()
     directory = create_execution_directory(project, test_name, timestamp)
     path = os.path.join(project_session.path, 'reports', test_name, timestamp)
     assert os.path.isdir(path)
     assert directory == path
Exemplo n.º 19
0
 def test_create_report_directory_test(self, project_session):
     project = project_session.name
     testdir = project_session.testdir
     timestamp = utils.get_timestamp()
     test_name = 'testing_report_001'
     exec_dir = report.create_execution_directory(testdir, project, timestamp,
                                                  test_name=test_name)
     directory = report.create_report_directory(exec_dir, test_name, is_suite=False)
     assert os.path.isdir(directory)
Exemplo n.º 20
0
def run_test_case(project, test_case_name, environment):
    timestamp = utils.get_timestamp()
    param_list = ['golem','run', project, test_case_name,
                  '--timestamp', timestamp]
    if environment:
        param_list.append('--environments')
        param_list.append(environment)
    subprocess.Popen(param_list)
    return timestamp
Exemplo n.º 21
0
def run_suite(project, suite_name):
    """Run a suite. This is used when running suites from the GUI"""
    script_name = sys.argv[0]
    timestamp = utils.get_timestamp()
    param_list = [
        script_name, '--golem-dir', session.testdir, 'run', project,
        suite_name, '--timestamp', timestamp
    ]
    subprocess.Popen(param_list)
    return timestamp
Exemplo n.º 22
0
def run_test_case(project, test_case_name, environment):
    """Run a test case. This is used when running tests from the GUI"""
    timestamp = utils.get_timestamp()
    param_list = ['golem', 'run', project, test_case_name,
                  '--timestamp', timestamp]
    if environment:
        param_list.append('--environments')
        param_list.append(environment)
    subprocess.Popen(param_list)
    return timestamp
Exemplo n.º 23
0
 def test_create_execution_directory_test(self, project_session):
     project = project_session.name
     testdir = project_session.testdir
     timestamp = utils.get_timestamp()
     test_name = 'test_execution_directory'
     directory = report.create_execution_directory(testdir, project, timestamp,
                                                   test_name=test_name)
     path = os.path.join(project_session.path, 'reports', 'single_tests', test_name, timestamp)
     assert os.path.isdir(path)
     assert directory == path
Exemplo n.º 24
0
 def test_create_execution_directory_suite_parents(self, project_session):
     project = project_session.name
     testdir = project_session.testdir
     timestamp = utils.get_timestamp()
     suite_name = 'a.b.suite_execution_directory'
     directory = report.create_execution_directory(testdir, project, timestamp,
                                                   suite_name=suite_name)
     path = os.path.join(project_session.path, 'reports', suite_name, timestamp)
     assert os.path.isdir(path)
     assert directory == path
Exemplo n.º 25
0
 def test_create_report_directory_test(self, project_session):
     testdir, project = project_session.activate()
     timestamp = utils.get_timestamp()
     test_name = 'testing_report_001'
     exec_dir = create_execution_dir_single_test(project, test_name,
                                                 timestamp)
     directory = create_report_directory(exec_dir,
                                         test_name,
                                         is_suite=False)
     assert os.path.isdir(directory)
Exemplo n.º 26
0
def runfix(project_module, test_utils):
    """A fixture that
      Uses a project fix with module scope,
      Creates a random test
      Creates a report directory for a future execution
      Gets the settings and browser values required to run test
      Can run the test provided the test code
      Can read the json report
    """
    testdir, project = project_module.activate()
    test_name = test_utils.create_random_test(project)
    timestamp = utils.get_timestamp()
    exec_dir = _mock_report_directory(project,
                                      execution_name=test_name,
                                      timestamp=timestamp)
    settings = settings_manager.get_project_settings(project)
    browser = _define_browsers_mock(['chrome'])[0]
    env_name = None

    def set_content(test_content):
        test_module.edit_test_code(project, test_name, test_content)

    def run_test(code,
                 test_data={},
                 secrets={},
                 from_suite=False,
                 set_name=''):
        set_content(code)
        test_runner.run_test(testdir,
                             project,
                             test_name,
                             test_data,
                             secrets,
                             browser,
                             env_name,
                             settings,
                             exec_dir,
                             set_name=set_name,
                             test_functions=[],
                             from_suite=from_suite)

    def read_report(set_name=''):
        return _read_report_json(exec_dir, test_name, set_name=set_name)

    fix = SimpleNamespace(testdir=testdir,
                          project=project,
                          test_name=test_name,
                          report_directory=exec_dir,
                          settings=settings,
                          browser=browser,
                          set_content=set_content,
                          run_test=run_test,
                          read_report=read_report)
    return fix
Exemplo n.º 27
0
 def test_create_execution_directory_suite(self, project_session):
     project = project_session['name']
     testdir = project_session['testdir']
     timestamp = utils.get_timestamp()
     suite_name = 'suite_execution_directory'
     directory = report.create_execution_directory(testdir, project, timestamp,
                                                   suite_name=suite_name)
     path = os.path.join(testdir, 'projects', project, 'reports',
                         suite_name, timestamp)
     assert os.path.isdir(path)
     assert directory == path
Exemplo n.º 28
0
 def test_create_execution_directory_test_parents(self, project_session):
     project = project_session['name']
     testdir = project_session['testdir']
     timestamp = utils.get_timestamp()
     test_name = 'a.b.test_execution_directory'
     directory = report.create_execution_directory(testdir, project, timestamp,
                                                   test_name=test_name)
     path = os.path.join(testdir, 'projects', project, 'reports',
                         'single_tests', test_name, timestamp)
     assert os.path.isdir(path)
     assert directory == path
Exemplo n.º 29
0
    def test_generate_report_with_env(self, permanent_project_fixture):
        project = permanent_project_fixture['name']
        testdir = permanent_project_fixture['testdir']
        timestamp = utils.get_timestamp()
        test_name = 'testing_report_003'
        exec_dir = report.create_execution_directory(testdir,
                                                     project,
                                                     timestamp,
                                                     test_name=test_name)
        report_dir = report.create_report_directory(exec_dir,
                                                    test_name,
                                                    is_suite=True)
        test_data = {
            'env': {
                'name': 'env01',
                'url': '1.1.1.1'
            },
            'var2': 'value2'
        }
        test_data = test_runner.Data(test_data)

        result = {
            'result': 'pass',
            'error': '',
            'description': 'description of the test',
            'steps': ['step1', 'step2'],
            'test_elapsed_time': 22.22,
            'test_timestamp': '2018.02.04.02.16.42.729',
            'browser': 'chrome',
            'browser_full_name': '',
            'set_name': 'set_001',
        }
        report.generate_report(report_dir, test_name, test_data, result)
        expected = {
            'test_case': test_name,
            'result': 'pass',
            'steps': ['step1', 'step2'],
            'description': 'description of the test',
            'error': '',
            'short_error': '',
            'test_elapsed_time': 22.22,
            'test_timestamp': '2018.02.04.02.16.42.729',
            'browser': 'chrome',
            'environment': 'env01',
            'set_name': 'set_001',
            'test_data': {
                'env': "{'name': 'env01', 'url': '1.1.1.1'}",
                'var2': "'value2'"
            }
        }
        path = os.path.join(report_dir, 'report.json')
        with open(path) as report_file:
            actual = json.load(report_file)
            assert actual == expected
Exemplo n.º 30
0
 def test_run_directory(self, _project_with_tags, capsys):
     _, project = _project_with_tags.activate()
     timestamp = utils.get_timestamp()
     execution_runner = exc_runner.ExecutionRunner(browsers=['chrome'], timestamp=timestamp)
     execution_runner.project = project
     execution_runner.run_directory('foo')
     out, err = capsys.readouterr()
     assert 'Tests found: 2' in out
     data = report_parser.get_execution_data(project=project, suite='foo', execution=timestamp)
     assert data['has_finished'] is True
     assert data['total_tests'] == 2
Exemplo n.º 31
0
def run_test_case(project, test_case_name, environment):
    timestamp = utils.get_timestamp()
    param_list = ['python', 'golem.py','run',
                  project,
                  test_case_name,
                  '--timestamp', timestamp]
    if environment:
        param_list.append('--environments')
        param_list.append(environment)
    subprocess.Popen(param_list)
    return timestamp
Exemplo n.º 32
0
 def test_create_report_directory_suite(self, permanent_project_fixture):
     project = permanent_project_fixture['name']
     testdir = permanent_project_fixture['testdir']
     timestamp = utils.get_timestamp()
     test_name = 'testing_report_002'
     exec_dir = report.create_execution_directory(testdir,
                                                  project,
                                                  timestamp,
                                                  test_name=test_name)
     directory = report.create_report_directory(exec_dir,
                                                test_name,
                                                is_suite=True)
     assert os.path.isdir(directory)
Exemplo n.º 33
0
def log_change(workspace, project, action, file_type, file_name, username):
    valid_actions = ['CREATE', 'MODIFY', 'RUN']
    valid_file_types = ['test']
    if action not in valid_actions:
        sys.exit('ERROR: {} is not a valid changelog action'.format(action))
    if file_type not in valid_file_types:
        sys.exit('ERROR: {} is not a valid file type'.format(file_type))
    timestamp = utils.get_timestamp()
    path = os.path.join(workspace, 'projects', project, 'changelog')
    with open(path, 'a+') as file:
        file.write('{0} {1} {2} {3} by {4}\n'.format(timestamp, action, file_type,
                                                     file_name, username))
        
Exemplo n.º 34
0
    def test__create_execution_directory_is_suite(self, testdir_fixture):
        """Verify that create_execution_directory works as expected when 
        a not suite is passed on
        """
        root_path = testdir_fixture['path']
        project = create_random_project(root_path)
        timestamp = utils.get_timestamp()
        test_name = 'any_test_name_does_not_matter_2'
        suite_name = 'single_tests'
        is_suite = False

        start_execution._create_execution_directory(root_path, project,
                                                    timestamp, test_name,
                                                    suite_name, is_suite)

        expected_path = os.path.join(root_path, 'projects', project,
                                     'reports', 'single_tests', test_name, timestamp)
        path_exists = os.path.isdir(expected_path)
        assert path_exists
Exemplo n.º 35
0
def lock_file(workspace, project, file_name, username):
    timestamp = utils.get_timestamp()
    path = os.path.join(workspace, 'projects', project, 'lock')
    with open(path, 'a') as file:
        file.write('{0} {1} by {2}'.format(file_name, timestamp, username))
    random_cleanup(workspace, project)
Exemplo n.º 36
0
def test_runner(workspace, project, test_case_name, test_data, suite_name,
                suite_data, suite_timestamp, settings):
    ''' runs a single test case by name'''
    result = {
        'result': 'pass',
        'error': None,
        'description': None,
        'steps': None,
        'test_elapsed_time': None,
        'test_timestamp': None}

    import execution_logger
    instance = None
    test_timestamp = utils.get_timestamp()
    test_start_time = time.time()

    golem.core.set_settings(settings)

    # create a directory to store report.json and screenshots
    report_directory = report.create_report_directory(workspace,
                                                      project,
                                                      test_case_name,
                                                      suite_name,
                                                      suite_timestamp)
    try:
        test_class = utils.get_test_case_class(
                        project,
                        test_case_name)
        instance = test_class()

        if hasattr(instance, 'setup'):
            instance.setup()
        else:
            raise Exception

        if hasattr(instance, 'test'):
            instance.test(test_data)
        else:
            raise Exception

    except:
        result['result'] = 'fail'
        result['error'] = traceback.format_exc()
        if settings['screenshot_on_error']:
            actions.capture('error')
        print dir(traceback)
        print traceback.print_exc()

    try:
        if hasattr(instance, 'teardown'):
            instance.teardown()
        else:
            raise Exception
    except:
        result['result'] = 'fail'
        result['error'] = 'teardown failed'

    test_end_time = time.time()
    test_elapsed_time = round(test_end_time - test_start_time, 2)

    result['description'] = execution_logger.description
    result['steps'] = execution_logger.steps
    result['test_elapsed_time'] = test_elapsed_time
    result['test_timestamp'] = test_timestamp
    result['screenshots'] = execution_logger.screenshots

    report.generate_report(report_directory,
                           test_case_name,
                           test_data,
                           result)
    return result
Exemplo n.º 37
0
def run_suite(project, suite_name):
    timestamp = utils.get_timestamp()
    subprocess.Popen(['python', 'golem.py', 'run', project, suite_name, '--timestamp', timestamp])
    return timestamp
Exemplo n.º 38
0
def run_test(workspace, project, test_name, test_data, browser,
             settings, report_directory):
    ''' runs a single test case by name'''
    result = {
        'result': 'pass',
        'error': '',
        'description': '',
        'steps': [],
        'test_elapsed_time': None,
        'test_timestamp': None,
        'browser': '',
        'browser_full_name': '',
        'set_name': '',
    }

    from golem.core import execution_logger
    from golem import actions
    from golem import execution

    # convert test_data to data obj
    class Data(dict):
        """dot notation access to dictionary attributes"""
        def __getattr__(*args):
            val = dict.get(*args)
            return Data(val) if type(val) is dict else val

        __setattr__ = dict.__setitem__
        __delattr__ = dict.__delitem__

    execution.data = Data(test_data)

    # set set_name
    # set name is the value of 'set_name' if present in the data table
    # if it is not present, use the value of the first column in the data table
    # if there's no data in the data table, leave set_name as ''
    _set_name = ''
    if 'set_name' in test_data:
        _set_name = test_data['set_name']
    elif test_data:
        data_without_env = dict(test_data)
        data_without_env.pop('env', None)
        if data_without_env:
            _set_name = test_data[next(iter(data_without_env))]
    result['set_name'] = _set_name

    logger = execution_logger.get_logger(report_directory,
                                         settings['console_log_level'],
                                         settings['log_all_events'])
    execution.logger = logger
    # Print execution info to console
    logger.info('Test execution started: {}'.format(test_name))
    logger.info('Browser: {}'.format(browser['name']))
    if 'env' in test_data:
        if 'name' in test_data['env']:
            logger.info('Environment: {}'.format(test_data['env']['name']))
    if test_data:
        data_string = '\n'
        for key, value in test_data.items():
            if key == 'env':
                if 'url' in value:
                    data_string += '    {}: {}\n'.format('url', value['url'])
            else:
                data_string += '    {}: {}\n'.format(key, value)
        logger.info('Using data: {}'.format(data_string))

    test_timestamp = utils.get_timestamp()
    test_start_time = time.time()
    execution.project = project
    execution.workspace = workspace
    execution.browser_definition = browser
    execution.settings = settings
    execution.report_directory = report_directory

    # add the 'project' directory to python path
    # so it's possible to make relative imports from the test
    # example, some_test.py
    # from pages import some_page
    sys.path.append(os.path.join(workspace, 'projects', project))

    test_module = None

    try:
        if '/' in test_name:
            test_name = test_name.replace('/', '.')
        test_module = importlib.import_module(
            'projects.{0}.tests.{1}'.format(project, test_name))

        # import the page objects into the test module
        if hasattr(test_module, 'pages'):
            for page in test_module.pages:
                test_module = utils.generate_page_object_module(project, test_module,
                                                                page, page.split('.'))

        # import logger into the test module
        setattr(test_module, 'logger', execution.logger)

        # import actions into the test module
        for action in dir(actions):
            setattr(test_module, action, getattr(actions, action))

        # log description
        if hasattr(test_module, 'description'):
            execution.description = test_module.description
        else:
            logger.info('Test does not have description')

        # run setup method
        if hasattr(test_module, 'setup'):
            test_module.setup(execution.data)
        else:
            logger.info('Test does not have setup function')

        if hasattr(test_module, 'test'):
            test_module.test(execution.data)
        else:
            raise Exception('Test does not have test function')

        if settings['screenshot_on_end'] and execution.browser:
            actions.capture('test end')
    except:
        result['result'] = 'fail'
        result['error'] = traceback.format_exc()
        try:
            if settings['screenshot_on_error'] and execution.browser:
                actions.capture('error')
        except:
            # if the test failed and driver is not available
            # capture screenshot is not possible, continue
            pass

        logger.error('An error ocurred:', exc_info=True)

    try:
        if hasattr(test_module, 'teardown'):
            test_module.teardown(execution.data)
        else:
            logger.info('Test does not have a teardown function')
    except:
        result['result'] = 'fail'
        result['error'] += '\n\nteardown failed'
        result['error'] += '\n' + traceback.format_exc()
        logger.error('An error ocurred in the teardown:', exc_info=True)

    # if there is no teardown or teardown failed or it did not close the driver,
    # let's try to close the driver manually
    if execution.browser:
        try:
            for browser, driver in execution.browsers.items():
                driver.quit()
        except:
            # if this fails, we have lost control over the webdriver window
            # and we are not going to be able to close it
            logger.error('There was an error closing the driver')
            logger.error(traceback.format_exc())
        finally:
            execution.browser = None

    test_end_time = time.time()
    test_elapsed_time = round(test_end_time - test_start_time, 2)

    if not result['error']:
        logger.info('Test passed')

    result['description'] = execution.description
    result['steps'] = execution.steps
    result['test_elapsed_time'] = test_elapsed_time
    result['test_timestamp'] = test_timestamp
    result['browser'] = execution.browser_definition['name']
    result['browser_full_name'] = execution.browser_definition['full_name']

    # remove golem.execution from sys.modules to guarantee thread safety
    #sys.modules['golem.execution'] = None
    
    report.generate_report(report_directory, test_name, execution.data, result)
    # del execution
    sys.modules['golem.execution'] = None
    return