Ejemplo n.º 1
0
 def test_create_report_directory_suite(self, project_session):
     timestamp = utils.get_timestamp()
     test_name = 'testing_report_002'
     exec_dir = report.create_execution_directory(project_session.testdir,
                                                  project_session.name, timestamp,
                                                  test_name=test_name)
     directory = report.create_report_directory(exec_dir, test_name, is_suite=True)
     assert os.path.isdir(directory)
Ejemplo n.º 2
0
 def test_create_report_directory_test(self, project_session):
     project = project_session.name
     testdir = project_session.testdir
     timestamp = utils.get_timestamp()
     test_name = 'testing_report_001'
     exec_dir = report.create_execution_directory(testdir, project, timestamp,
                                                  test_name=test_name)
     directory = report.create_report_directory(exec_dir, test_name, is_suite=False)
     assert os.path.isdir(directory)
Ejemplo n.º 3
0
    def test_generate_report_with_env(self, permanent_project_fixture):
        project = permanent_project_fixture['name']
        testdir = permanent_project_fixture['testdir']
        timestamp = utils.get_timestamp()
        test_name = 'testing_report_003'
        exec_dir = report.create_execution_directory(testdir,
                                                     project,
                                                     timestamp,
                                                     test_name=test_name)
        report_dir = report.create_report_directory(exec_dir,
                                                    test_name,
                                                    is_suite=True)
        test_data = {
            'env': {
                'name': 'env01',
                'url': '1.1.1.1'
            },
            'var2': 'value2'
        }
        test_data = test_runner.Data(test_data)

        result = {
            'result': 'pass',
            'error': '',
            'description': 'description of the test',
            'steps': ['step1', 'step2'],
            'test_elapsed_time': 22.22,
            'test_timestamp': '2018.02.04.02.16.42.729',
            'browser': 'chrome',
            'browser_full_name': '',
            'set_name': 'set_001',
        }
        report.generate_report(report_dir, test_name, test_data, result)
        expected = {
            'test_case': test_name,
            'result': 'pass',
            'steps': ['step1', 'step2'],
            'description': 'description of the test',
            'error': '',
            'short_error': '',
            'test_elapsed_time': 22.22,
            'test_timestamp': '2018.02.04.02.16.42.729',
            'browser': 'chrome',
            'environment': 'env01',
            'set_name': 'set_001',
            'test_data': {
                'env': "{'name': 'env01', 'url': '1.1.1.1'}",
                'var2': "'value2'"
            }
        }
        path = os.path.join(report_dir, 'report.json')
        with open(path) as report_file:
            actual = json.load(report_file)
            assert actual == expected
Ejemplo n.º 4
0
 def test_create_report_directory_suite(self, permanent_project_fixture):
     project = permanent_project_fixture['name']
     testdir = permanent_project_fixture['testdir']
     timestamp = utils.get_timestamp()
     test_name = 'testing_report_002'
     exec_dir = report.create_execution_directory(testdir,
                                                  project,
                                                  timestamp,
                                                  test_name=test_name)
     directory = report.create_report_directory(exec_dir,
                                                test_name,
                                                is_suite=True)
     assert os.path.isdir(directory)
Ejemplo n.º 5
0
 def test_generate_report(self, project_session):
     project = project_session['name']
     testdir = project_session['testdir']
     timestamp = utils.get_timestamp()
     test_name = 'testing_report_002'
     exec_dir = report.create_execution_directory(testdir, project, timestamp,
                                                  test_name=test_name)
     report_dir = report.create_report_directory(exec_dir, test_name,
                                                 is_suite=True)
     test_data = {
         'var1': 'value1',
         'var2': 'value2'
     }
     result = {
         'result': 'pass',
         'error': '',
         'description': 'description of the test',
         'steps': ['step1', 'step2'],
         'test_elapsed_time': 22.22,
         'test_timestamp': '2018.02.04.02.16.42.729',
         'browser': 'chrome',
         'browser_full_name': '',
         'set_name': 'set_001',
     }
     report.generate_report(report_dir, test_name, test_data, result, timestamp, hash(timestamp + str(test_data)))
     expected = {
         'test_case': test_name,
         'result': 'pass',
         'steps': ['step1', 'step2'],
         'description': 'description of the test',
         'error': '',
         'short_error': '',
         'test_elapsed_time': 22.22,
         'test_timestamp': '2018.02.04.02.16.42.729',
         'browser': 'chrome',
         'environment': '',
         'set_name': 'set_001',
         'suite_timestamp': timestamp,
         'test_id': hash(timestamp + str(test_data)),
         'user': getpass.getuser(),
         'hostname': socket.gethostname(),
         'test_data': {
             'var1': "'value1'",
             'var2': "'value2'"
         }
     }
     path = os.path.join(report_dir, 'report.json')
     with open(path) as report_file:
         actual = json.load(report_file)
         assert actual == expected
Ejemplo n.º 6
0
 def test_generate_report_with_env(self, project_session):
     timestamp = utils.get_timestamp()
     test_name = 'testing_report_003'
     exec_dir = report.create_execution_directory(project_session.testdir,
                                                  project_session.name, timestamp,
                                                  test_name=test_name)
     report_dir = report.create_report_directory(exec_dir, test_name, is_suite=True)
     test_data = {
         'env': {
             'name': 'env01',
             'url': '1.1.1.1'
         },
         'var2': 'value2'
     }
     test_data = test_runner.Data(test_data)
     result = {
         'result': 'success',
         'errors': [],
         'description': 'description of the test',
         'steps': [
             {'message': 'step1', 'screenshot': None, 'error': None},
             {'message': 'step2', 'screenshot': None, 'error': None}
         ],
         'test_elapsed_time': 22.22,
         'test_timestamp': '2018.02.04.02.16.42.729',
         'browser': 'chrome',
         'browser_full_name': '',
         'set_name': 'set_001',
     }
     report.generate_report(report_dir, test_name, test_data, result)
     path = os.path.join(report_dir, 'report.json')
     with open(path) as report_file:
         actual = json.load(report_file)
         assert len(actual.items()) == 11
         assert actual['test_case'] == test_name
         assert actual['result'] == 'success'
         assert actual['steps'][0]['message'] == 'step1'
         assert actual['steps'][1]['message'] == 'step2'
         assert actual['description'] == 'description of the test'
         assert actual['errors'] == []
         assert actual['test_elapsed_time'] == 22.22
         assert actual['test_timestamp'] == '2018.02.04.02.16.42.729'
         assert actual['browser'] == 'chrome'
         assert actual['environment'] == 'env01'
         assert actual['set_name'] == 'set_001'
         test_data_a = "{'url': '1.1.1.1', 'name': 'env01'}"
         test_data_b = "{'name': 'env01', 'url': '1.1.1.1'}"
         assert actual['test_data']['env'] in [test_data_a, test_data_b]
         assert actual['test_data']['var2'] == "'value2'"
Ejemplo n.º 7
0
def run_test_or_suite(workspace, project, test=None, suite=None, directory_suite=None):
    '''run a test, a suite or a "directory suite"'''
    # suitex = {
    #     'tests': []
    # }
    tests = []
    threads = 1
    suite_amount_workers = None
    suite_drivers = None
    suite_envs = []
    drivers = []
    suite_module = None
    report_suite_name = None
    is_suite = False
    # get test list
    if test:
        tests = [test]
        report_suite_name = 'single_tests'
    elif suite:
        tests = utils.get_suite_test_cases(workspace, project, suite)
        suite_amount_workers = utils.get_suite_amount_of_workers(workspace, project, suite)
        suite_drivers = utils.get_suite_browsers(workspace, project, suite)
        suite_envs = utils.get_suite_environments(workspace, project, suite)
        suite_module = utils.get_suite_module(test_execution.root_path,
                                              test_execution.project,
                                              suite)
        report_suite_name = suite
        is_suite = True
    elif directory_suite:
        tests = utils.get_directory_suite_test_cases(workspace, project, directory_suite)
        report_suite_name = directory_suite
        is_suite = True
    else:
        sys.exit("ERROR: invalid arguments for run_test_or_suite()")

    # get threads
    if test_execution.thread_amount:
        # the thread count passed through cli has higher priority
        threads = test_execution.thread_amount
    elif suite_amount_workers:
        threads = suite_amount_workers

    settings_default_driver = test_execution.settings['default_browser']
    drivers = utils.choose_driver_by_precedence(cli_drivers=test_execution.cli_drivers,
                                                suite_drivers=suite_drivers,
                                                settings_default_driver=settings_default_driver)
    
    # check if drivers are remote
    remote_browsers = settings_manager.get_remote_browsers(test_execution.settings)
    default_browsers = gui_utils.get_supported_browsers_suggestions()
    drivers_temp = []
    for driver in drivers:
        if driver in remote_browsers:
            remote_browser = test_execution.settings['remote_browsers'][driver]
            _ = {
                'name': remote_browser['browserName'],
                'full_name': driver,
                'remote': True,
                'capabilities': remote_browser
            }
            drivers_temp.append(_)
        elif driver in default_browsers:
            _ = {
                'name': driver,
                'full_name': '',
                'remote': False,
                'capabilities': None
            }
            drivers_temp.append(_)
        else:
            msg = ['Error: the browser {} is not defined\n'.format(driver),
                   'available options are:\n',
                   '\n'.join(default_browsers),
                   '\n'.join(remote_browsers)]
            #sys.exit('Error: the browser {} is not defined'.format(driver))
            sys.exit(''.join(msg))

    drivers = drivers_temp

    # timestamp is passed when the test is executed from the GUI,
    # otherwise, a timestamp should be generated at this point
    # the timestamp is used to identify this unique execution of the test or suite
    if not test_execution.timestamp:
        test_execution.timestamp = utils.get_timestamp()

    #######
    project_envs = environment_manager.get_envs(project)
    envs = []
    if test_execution.cli_environments:
        # use the environments passed through command line if available
        envs = test_execution.cli_environments
    elif suite_envs:
        # use the environments defined in the suite
        envs = suite_envs
    elif project_envs:
        # if there are available envs, try to use the first by default
        envs = [project_envs[0]]
    else:
        # execute using a blank environment
        envs = ['']

    envs_data = environment_manager.get_environment_data(project)
    # get test data for each test present in the list of tests
    # for each test in the list, for each data set and driver combination
    # append an entry to the execution_list
    execution_list = []
    for test_case in tests:
        data_sets = utils.get_test_data(workspace, project, test_case)
        for data_set in data_sets:
            for env in envs:
                data_set_env = dict(data_set)
                if env in envs_data:
                    env_data = envs_data[env]
                    ## adding env_data to data_set
                    data_set_env['env'] = env_data
                    data_set_env['env']['name'] = env
                for driver in drivers:
                    execution_list.append(
                        {
                            'test_name': test_case,
                            'data_set': data_set_env,
                            'driver': driver,
                            'report_directory': None
                        }
                    )

    if is_suite:
        execution_directory = report.create_suite_execution_directory(test_execution.root_path,
                                                                      test_execution.project,
                                                                      report_suite_name,
                                                                      test_execution.timestamp)
    else:
        execution_directory = report.create_test_execution_directory(test_execution.root_path,
                                                                     test_execution.project,
                                                                     test,
                                                                     test_execution.timestamp)
    #
    for test in execution_list:
        # generate a report directory for this test
        report_directory = report.create_report_directory(execution_directory,
                                                          test['test_name'],
                                                          is_suite)
        test['report_directory'] = report_directory


    if suite:
        if hasattr(suite_module, 'before'):
            suite_module.before()

    if test_execution.interactive and threads == 1:
        if threads == 1:
            # run tests serially
            for test in execution_list:
                run_test(test_execution.root_path, test_execution.project,
                         test['test_name'], test['data_set'],
                         test['driver'], test_execution.settings,
                         test['report_directory'])
        else:
            print('Error: to run in debug mode, threads must equal one')
    else:
        # run list of tests using multiprocessing
        multiprocess_executor(execution_list, is_suite, execution_directory, threads)

    if suite:
        if hasattr(suite_module, 'after'):
            suite_module.after()
Ejemplo n.º 8
0
    def _prepare(self):
        # Generate timestamp if needed.
        # A timestamp is passed when the test is executed from the GUI.
        # The gui uses this timestamp to fetch the test execution status later on.
        # Otherwise, a new timestamp should be generated at this point.
        if not self.timestamp:
            self.timestamp = utils.get_timestamp()

        # create the execution report directory
        # if this is a suite, the directory takes this structure:
        #   reports/<suite_name>/<timestamp>/
        #
        # if this is a single test, the directory takes this structure:
        #   reports/single_tests/<test_name>/<timestamp>/
        self.execution.reportdir = self._create_execution_directory()

        # Filter tests by tags
        self.execution.tags = self.cli_args.tags or self.suite.tags or []
        if self.execution.tags:
            self.tests = self._filter_tests_by_tags()

        if not self.tests:
            self._finalize()
        else:
            # get amount of processes (parallel executions), default is 1
            if self.cli_args.processes > 1:
                # the processes arg passed through cli has higher priority
                self.execution.processes = self.cli_args.processes
            elif self.suite.processes:
                self.execution.processes = self.suite.processes

            # select the browsers to use in this execution
            # the order of precedence is:
            # 1. browsers defined by CLI
            # 2. browsers defined inside a suite
            # 3. 'default_browser' setting key
            # 4. default default is 'chrome'
            self.selected_browsers = utils.choose_browser_by_precedence(
                cli_browsers=self.cli_args.browsers,
                suite_browsers=self.suite.browsers,
                settings_default_browser=session.settings['default_browser'])

            # Define the attributes for each browser.
            # A browser name can be predefined ('chrome, 'chrome-headless', 'firefox', etc)
            # or it can be defined by the user with the 'remote_browsers' setting.
            # Remote browsers have extra details such as capabilities
            #
            # Each defined browser must have the following attributes:
            # 'name': real name,
            # 'full_name': the remote_browser name defined by the user,
            # 'remote': is this a remote_browser or not
            # 'capabilities': full capabilities defined in the remote_browsers setting
            remote_browsers = settings_manager.get_remote_browsers(session.settings)
            default_browsers = gui_utils.get_supported_browsers_suggestions()
            self.execution.browsers = define_browsers(self.selected_browsers, remote_browsers,
                                                      default_browsers)
            # Select which environments to use
            # The user can define environments in the environments.json file.
            # The suite/test can be executed in one or more of these environments.
            # Which environments will be used is defined by this order of preference:
            # 1. envs passed by CLI
            # 2. envs defined inside the suite
            # 3. The first env defined for the project
            # 4. no envs at all
            #
            # Note, in the case of 4, the test might fail if it tries
            # to use env variables
            project_envs = environment_manager.get_envs(self.project)
            self.execution.envs = self._select_environments(project_envs)
            invalid_envs = [e for e in self.execution.envs if e not in project_envs]
            if invalid_envs:
                print('ERROR: the following environments do not exist for project {}: {}'
                      .format(self.project, ', '.join(invalid_envs)))
                self.execution.has_failed_tests.value = True
                self._finalize()
                return

            # Generate the execution list
            # Each test must be executed for each:
            # * data set
            # * environment
            # * browser
            # The result is a list that contains all the requested combinations
            self.execution.tests = self._define_execution_list()

            self._print_number_of_tests_found()

            # for each test, create the test report directory
            # for example, in a suite 'suite1' with a 'test1':
            # reports/suite1/2017.07.02.19.22.20.001/test1/set_00001/
            for test in self.execution.tests:
                test.reportdir = report.create_report_directory(self.execution.reportdir,
                                                                test.name, self.is_suite)
            try:
                self._execute()
            except KeyboardInterrupt:
                self.execution.has_failed_tests.value = True
                self._finalize()
Ejemplo n.º 9
0
def run_test_or_suite(workspace,
                      project,
                      test=None,
                      suite=None,
                      directory=None):
    """Run a suite or test or directory containing tests."""
    execution = {
        'tests': [],
        'workers': 1,
        'drivers': [],
        'environments': [],
        'suite_before': None,
        'suite_after': None
    }

    suite_amount_workers = None
    suite_drivers = None
    suite_envs = []
    suite_name = None
    is_suite = False

    if test:
        execution['tests'] = [test]
        suite_name = 'single_tests'
    elif suite:
        execution['tests'] = suite_module.get_suite_test_cases(
            workspace, project, suite)
        suite_amount_workers = suite_module.get_suite_amount_of_workers(
            workspace, project, suite)
        suite_drivers = suite_module.get_suite_browsers(
            workspace, project, suite)
        suite_envs = suite_module.get_suite_environments(
            workspace, project, suite)
        suite_imported_module = suite_module.get_suite_module(
            workspace, project, suite)
        execution['suite_before'] = getattr(suite_imported_module, 'before',
                                            None)
        execution['suite_after'] = getattr(suite_imported_module, 'after',
                                           None)
        suite_name = suite
        is_suite = True
    elif directory:
        execution['tests'] = utils.get_directory_test_cases(
            workspace, project, directory)
        suite_name = directory
        is_suite = True
    else:
        sys.exit("ERROR: invalid arguments for run_test_or_suite()")

    # warn if no tests were found
    if len(execution['tests']) == 0:
        print('Warning: no tests were found')

    # get amount of workers (parallel executions), default is 1
    if test_execution.thread_amount:
        # the thread count passed through cli has higher priority
        execution['workers'] = test_execution.thread_amount
    elif suite_amount_workers:
        execution['workers'] = suite_amount_workers

    # select the drivers to use in this execution
    # the order of precedence is:
    # 1. drivers defined by CLI
    # 2. drivers defined inside a suite
    # 3. 'default_driver' setting
    # 4. default default is 'chrome'
    settings_default_driver = test_execution.settings['default_browser']
    selected_drivers = utils.choose_driver_by_precedence(
        cli_drivers=test_execution.cli_drivers,
        suite_drivers=suite_drivers,
        settings_default_driver=settings_default_driver)

    # Define the attributes for each driver
    #
    # A driver can be predefined ('chrome, 'chrome-headless', 'firefox', etc)
    # or it can be defined by the user with the 'remote_browsers' setting.
    # Remote browsers have extra details such as capabilities
    #
    # Each driver must have the following attributes:
    # 'name': real name,
    # 'full_name': the remote_browser name defined by the user,
    # 'remote': is this a remote_browser or not
    # 'capabilities': full capabilities defined in the remote_browsers setting
    remote_browsers = settings_manager.get_remote_browsers(
        test_execution.settings)
    default_browsers = gui_utils.get_supported_browsers_suggestions()
    execution['drivers'] = _define_drivers(selected_drivers, remote_browsers,
                                           default_browsers)

    # Generate timestamp if needed
    # A timestamp is passed when the test is executed from the GUI.
    # The gui uses this timestamp to fetch the test execution status later on.
    # Otherwise, a new timestamp should be generated at this point
    if not test_execution.timestamp:
        test_execution.timestamp = utils.get_timestamp()

    # Select which envs to use
    # The user can define environments in the environments.json file.
    # The suite/test can be executed in one or more of these environments.
    # Which environments to use is defined by this order of preference:
    # 1. envs passed by CLI
    # 2. envs defined inside the suite
    # 3. The first env defined
    # 4. no envs at all
    #
    # Note, in the case of 4, the test might fail if it tries
    # to use env variables
    cli_envs = test_execution.cli_environments
    project_envs = environment_manager.get_envs(workspace, project)
    execution['environments'] = _select_environments(cli_envs, suite_envs,
                                                     project_envs)

    # Generate the execution list
    #
    # Each test must be executed for each:
    # * data set
    # * environment
    # * driver
    #
    # The result is a list that contains all the requested combinations
    execution_list = _define_execution_list(workspace, project, execution)

    # create the execution directory
    #
    # if this is a suite, the directory takes this structure
    #   reports/<suite_name>/<timestamp>/
    #
    # if this is a single test, the directory takes this structure:
    #   reports/single_tests/<test_name>/<timestamp>/
    execution_directory = _create_execution_directory(workspace,
                                                      project,
                                                      test_execution.timestamp,
                                                      test_name=test,
                                                      suite_name=suite_name,
                                                      is_suite=is_suite)
    # for each test, create the test directory
    # for example, in a suite 'suite1' with a 'test1':
    # reports/suite1/2017.07.02.19.22.20.001/test1/set_00001/
    for test in execution_list:
        report_directory = report.create_report_directory(
            execution_directory, test['test_name'], is_suite)
        test['report_directory'] = report_directory

    # EXECUTION

    start_time = time.time()
    suite_error = False

    # run suite `before` function
    if execution['suite_before']:
        try:
            execution['suite_before'].__call__()
        except:
            print('ERROR: suite before function failed')
            print(traceback.format_exc())

    if not suite_error:
        if test_execution.interactive and execution['workers'] != 1:
            print('WARNING: to run in debug mode, threads must equal one')

        if execution['workers'] == 1:
            # run tests serially
            for test in execution_list:
                run_test(workspace, project, test['test_name'],
                         test['data_set'], test['driver'],
                         test_execution.settings, test['report_directory'])
        else:
            # run tests using multiprocessing
            multiprocess_executor(execution_list, execution['workers'])

    # run suite `after` function
    if execution['suite_after']:
        try:
            execution['suite_after'].__call__()
        except:
            print('ERROR: suite before function failed')
            print(traceback.format_exc())

    # generate execution_result.json
    elapsed_time = round(time.time() - start_time, 2)
    report_parser.generate_execution_report(execution_directory, elapsed_time)
Ejemplo n.º 10
0
def test_runner(workspace, project, test_case_name, test_data, suite_name,
                suite_data, suite_timestamp, settings):
    ''' runs a single test case by name'''
    result = {
        'result': 'pass',
        'error': None,
        'description': None,
        'steps': None,
        'test_elapsed_time': None,
        'test_timestamp': None}

    import execution_logger
    instance = None
    test_timestamp = utils.get_timestamp()
    test_start_time = time.time()

    golem.core.set_settings(settings)

    # create a directory to store report.json and screenshots
    report_directory = report.create_report_directory(workspace,
                                                      project,
                                                      test_case_name,
                                                      suite_name,
                                                      suite_timestamp)
    try:
        test_class = utils.get_test_case_class(
                        project,
                        test_case_name)
        instance = test_class()

        if hasattr(instance, 'setup'):
            instance.setup()
        else:
            raise Exception

        if hasattr(instance, 'test'):
            instance.test(test_data)
        else:
            raise Exception

    except:
        result['result'] = 'fail'
        result['error'] = traceback.format_exc()
        if settings['screenshot_on_error']:
            actions.capture('error')
        print dir(traceback)
        print traceback.print_exc()

    try:
        if hasattr(instance, 'teardown'):
            instance.teardown()
        else:
            raise Exception
    except:
        result['result'] = 'fail'
        result['error'] = 'teardown failed'

    test_end_time = time.time()
    test_elapsed_time = round(test_end_time - test_start_time, 2)

    result['description'] = execution_logger.description
    result['steps'] = execution_logger.steps
    result['test_elapsed_time'] = test_elapsed_time
    result['test_timestamp'] = test_timestamp
    result['screenshots'] = execution_logger.screenshots

    report.generate_report(report_directory,
                           test_case_name,
                           test_data,
                           result)
    return result
Ejemplo n.º 11
0
def run_test_or_suite(workspace,
                      project,
                      test=None,
                      suite=None,
                      directory_suite=None):
    '''run a test, a suite or a "directory suite"'''

    tests = []
    threads = 1
    suite_amount_workers = None
    suite_drivers = None
    drivers = []
    suite_module = None
    report_suite_name = None
    is_suite = False

    # get test list
    if test:
        tests = [test]
        report_suite_name = 'single_tests'
    elif suite:
        tests = utils.get_suite_test_cases(workspace, project, suite)
        suite_amount_workers = utils.get_suite_amount_of_workers(
            workspace, project, suite)
        suite_drivers = utils.get_suite_browsers(workspace, project, suite)
        suite_module = utils.get_suite_module(test_execution.root_path,
                                              test_execution.project, suite)
        report_suite_name = suite
        is_suite = True
    elif directory_suite:
        tests = utils.get_directory_suite_test_cases(workspace, project,
                                                     directory_suite)
        report_suite_name = directory_suite
        is_suite = True
    else:
        sys.exit("ERROR: invalid arguments for run_test_or_suite()")

    # get threads
    if test_execution.thread_amount:
        # the thread count passed through cli has higher priority
        threads = test_execution.thread_amount
    elif suite_amount_workers:
        threads = suite_amount_workers

    drivers = utils.choose_driver_by_precedence(
        cli_drivers=test_execution.cli_drivers,
        suite_drivers=suite_drivers,
        settings_default_driver=test_execution.settings['default_driver'])

    # timestamp is passed when the test is executed from the GUI,
    # otherwise, a timestamp should be generated at this point
    # the timestamp is used to identify this unique execution of the test or suite
    if not test_execution.timestamp:
        test_execution.timestamp = utils.get_timestamp()

    # get test data for each test present in the list of tests
    # for each test in the list, for each data set and driver combination
    # append an entry to the execution_list
    execution_list = []
    for test_case in tests:
        data_sets = utils.get_test_data(workspace, project, test_case)
        for data_set in data_sets:
            for driver in drivers:
                execution_list.append({
                    'test_name': test_case,
                    'data_set': vars(data_set),
                    'driver': driver,
                    'report_directory': None
                })
    if is_suite:
        execution_directory = report.create_suite_execution_directory(
            test_execution.root_path, test_execution.project,
            report_suite_name, test_execution.timestamp)
    else:
        execution_directory = report.create_test_execution_directory(
            test_execution.root_path, test_execution.project, test,
            test_execution.timestamp)
    #
    for test in execution_list:
        # generate a report directory for this test
        report_directory = report.create_report_directory(
            execution_directory, test['test_name'], is_suite)
        test['report_directory'] = report_directory

    if suite:
        if hasattr(suite_module, 'before'):
            suite_module.before()

    if test_execution.interactive and threads == 1:
        if threads == 1:
            # run tests serially
            for test in execution_list:
                run_test(test_execution.root_path, test_execution.project,
                         test['test_name'], test['data_set'], test['driver'],
                         test_execution.settings, test['report_directory'])
        else:
            print('Error: to run in debug mode, threads must equal one')
    else:
        # run list of tests using threading
        multiprocess_executor(execution_list, is_suite, execution_directory,
                              threads)

    if suite:
        if hasattr(suite_module, 'after'):
            suite_module.after()