コード例 #1
0
ファイル: test_runner.py プロジェクト: lucianopuccio/golem
def run_suite(workspace, project, full_suite_name):
    ''' a suite '''

    # TO DO implement directory suites

    if not utils.test_suite_exists(workspace, project, full_suite_name):
        sys.exit(
            "ERROR: no test suite named {} exists".format(full_suite_name))
    else:
        # get test case list
        test_case_list = utils.get_suite_test_cases(project,
                                                    full_suite_name)

        # get test data for each test case present in the suite 
        # and append tc/data pairs for each test case and for each data
        # set to execution list.
        # if there is no data for a test case, it is appended with an
        # empty dict
        execution_list = []
        for test_case in test_case_list:
            data_sets = utils.get_test_data(workspace,
                                            project,
                                            test_case)
            if data_sets:
                for data_set in data_sets:
                    execution_list.append((test_case, data_set))
            else:
                execution_list.append((test_case, {}))

    multiprocess_executor(execution_list, 1, suite_name=full_suite_name)
コード例 #2
0
ファイル: data.py プロジェクト: lucianopuccio/golem
def is_data_variable(root_path, project, parents, test_case_name, parameter_name):
    parents += [test_case_name]
    test_data = utils.get_test_data(root_path, project, '.'.join(parents))
    print test_data
    if test_data:
        if parameter_name in test_data[0].keys():
            return True
    return False
コード例 #3
0
ファイル: test_runner.py プロジェクト: lucianopuccio/golem
def run_single_test_case(workspace, project, full_test_case_name):

    # check if test case exists
    if not utils.test_case_exists(workspace, project, full_test_case_name):
        sys.exit(
            "ERROR: no test case named {} exists".format(full_test_case_name))
    else:
        # get test data
        data_sets = utils.get_test_data(workspace,
                                        project,
                                        full_test_case_name)
        execution_list = []
        if data_sets:
            for data_set in data_sets:
                execution_list.append((full_test_case_name, data_set))
        else:
            execution_list.append((full_test_case_name, {}))
        # run the single test, once for each data set
        multiprocess_executor(execution_list, 2)
コード例 #4
0
ファイル: __init__.py プロジェクト: lucianopuccio/golem
def test_case_view(project, test_case_name):
    # check if user has permissions for this project
    if not user.has_permissions_to_project(g.user.id, project, root_path):
        return render_template('not_permission.html')

    tc_name, parents = utils.separate_file_from_parents(test_case_name)

    test_case_data = test_case.parse_test_case(
                                        root_path,
                                        project,
                                        parents,
                                        tc_name)

    test_data = utils.get_test_data(root_path,
                                   project,
                                   test_case_name)

    return render_template(
                    'test_case.html',
                    project=project,
                    test_case_data=test_case_data,
                    test_case_name=tc_name,
                    full_test_case_name=test_case_name,
                    test_data=test_data)
コード例 #5
0
ファイル: start_execution.py プロジェクト: IbnuFajar/golem
def run_test_or_suite(workspace, project, test=None, suite=None, directory_suite=None):
    '''run a test, a suite or a "directory suite"'''
    # suitex = {
    #     'tests': []
    # }
    tests = []
    threads = 1
    suite_amount_workers = None
    suite_drivers = None
    suite_envs = []
    drivers = []
    suite_module = None
    report_suite_name = None
    is_suite = False
    # get test list
    if test:
        tests = [test]
        report_suite_name = 'single_tests'
    elif suite:
        tests = utils.get_suite_test_cases(workspace, project, suite)
        suite_amount_workers = utils.get_suite_amount_of_workers(workspace, project, suite)
        suite_drivers = utils.get_suite_browsers(workspace, project, suite)
        suite_envs = utils.get_suite_environments(workspace, project, suite)
        suite_module = utils.get_suite_module(test_execution.root_path,
                                              test_execution.project,
                                              suite)
        report_suite_name = suite
        is_suite = True
    elif directory_suite:
        tests = utils.get_directory_suite_test_cases(workspace, project, directory_suite)
        report_suite_name = directory_suite
        is_suite = True
    else:
        sys.exit("ERROR: invalid arguments for run_test_or_suite()")

    # get threads
    if test_execution.thread_amount:
        # the thread count passed through cli has higher priority
        threads = test_execution.thread_amount
    elif suite_amount_workers:
        threads = suite_amount_workers

    settings_default_driver = test_execution.settings['default_browser']
    drivers = utils.choose_driver_by_precedence(cli_drivers=test_execution.cli_drivers,
                                                suite_drivers=suite_drivers,
                                                settings_default_driver=settings_default_driver)
    
    # check if drivers are remote
    remote_browsers = settings_manager.get_remote_browsers(test_execution.settings)
    default_browsers = gui_utils.get_supported_browsers_suggestions()
    drivers_temp = []
    for driver in drivers:
        if driver in remote_browsers:
            remote_browser = test_execution.settings['remote_browsers'][driver]
            _ = {
                'name': remote_browser['browserName'],
                'full_name': driver,
                'remote': True,
                'capabilities': remote_browser
            }
            drivers_temp.append(_)
        elif driver in default_browsers:
            _ = {
                'name': driver,
                'full_name': '',
                'remote': False,
                'capabilities': None
            }
            drivers_temp.append(_)
        else:
            msg = ['Error: the browser {} is not defined\n'.format(driver),
                   'available options are:\n',
                   '\n'.join(default_browsers),
                   '\n'.join(remote_browsers)]
            #sys.exit('Error: the browser {} is not defined'.format(driver))
            sys.exit(''.join(msg))

    drivers = drivers_temp

    # timestamp is passed when the test is executed from the GUI,
    # otherwise, a timestamp should be generated at this point
    # the timestamp is used to identify this unique execution of the test or suite
    if not test_execution.timestamp:
        test_execution.timestamp = utils.get_timestamp()

    #######
    project_envs = environment_manager.get_envs(project)
    envs = []
    if test_execution.cli_environments:
        # use the environments passed through command line if available
        envs = test_execution.cli_environments
    elif suite_envs:
        # use the environments defined in the suite
        envs = suite_envs
    elif project_envs:
        # if there are available envs, try to use the first by default
        envs = [project_envs[0]]
    else:
        # execute using a blank environment
        envs = ['']

    envs_data = environment_manager.get_environment_data(project)
    # get test data for each test present in the list of tests
    # for each test in the list, for each data set and driver combination
    # append an entry to the execution_list
    execution_list = []
    for test_case in tests:
        data_sets = utils.get_test_data(workspace, project, test_case)
        for data_set in data_sets:
            for env in envs:
                data_set_env = dict(data_set)
                if env in envs_data:
                    env_data = envs_data[env]
                    ## adding env_data to data_set
                    data_set_env['env'] = env_data
                    data_set_env['env']['name'] = env
                for driver in drivers:
                    execution_list.append(
                        {
                            'test_name': test_case,
                            'data_set': data_set_env,
                            'driver': driver,
                            'report_directory': None
                        }
                    )

    if is_suite:
        execution_directory = report.create_suite_execution_directory(test_execution.root_path,
                                                                      test_execution.project,
                                                                      report_suite_name,
                                                                      test_execution.timestamp)
    else:
        execution_directory = report.create_test_execution_directory(test_execution.root_path,
                                                                     test_execution.project,
                                                                     test,
                                                                     test_execution.timestamp)
    #
    for test in execution_list:
        # generate a report directory for this test
        report_directory = report.create_report_directory(execution_directory,
                                                          test['test_name'],
                                                          is_suite)
        test['report_directory'] = report_directory


    if suite:
        if hasattr(suite_module, 'before'):
            suite_module.before()

    if test_execution.interactive and threads == 1:
        if threads == 1:
            # run tests serially
            for test in execution_list:
                run_test(test_execution.root_path, test_execution.project,
                         test['test_name'], test['data_set'],
                         test['driver'], test_execution.settings,
                         test['report_directory'])
        else:
            print('Error: to run in debug mode, threads must equal one')
    else:
        # run list of tests using multiprocessing
        multiprocess_executor(execution_list, is_suite, execution_directory, threads)

    if suite:
        if hasattr(suite_module, 'after'):
            suite_module.after()
コード例 #6
0
def run_test_or_suite(workspace,
                      project,
                      test=None,
                      suite=None,
                      directory_suite=None):
    '''run a test, a suite or a "directory suite"'''

    tests = []
    threads = 1
    suite_amount_workers = None
    suite_drivers = None
    drivers = []
    suite_module = None
    report_suite_name = None
    is_suite = False

    # get test list
    if test:
        tests = [test]
        report_suite_name = 'single_tests'
    elif suite:
        tests = utils.get_suite_test_cases(workspace, project, suite)
        suite_amount_workers = utils.get_suite_amount_of_workers(
            workspace, project, suite)
        suite_drivers = utils.get_suite_browsers(workspace, project, suite)
        suite_module = utils.get_suite_module(test_execution.root_path,
                                              test_execution.project, suite)
        report_suite_name = suite
        is_suite = True
    elif directory_suite:
        tests = utils.get_directory_suite_test_cases(workspace, project,
                                                     directory_suite)
        report_suite_name = directory_suite
        is_suite = True
    else:
        sys.exit("ERROR: invalid arguments for run_test_or_suite()")

    # get threads
    if test_execution.thread_amount:
        # the thread count passed through cli has higher priority
        threads = test_execution.thread_amount
    elif suite_amount_workers:
        threads = suite_amount_workers

    drivers = utils.choose_driver_by_precedence(
        cli_drivers=test_execution.cli_drivers,
        suite_drivers=suite_drivers,
        settings_default_driver=test_execution.settings['default_driver'])

    # timestamp is passed when the test is executed from the GUI,
    # otherwise, a timestamp should be generated at this point
    # the timestamp is used to identify this unique execution of the test or suite
    if not test_execution.timestamp:
        test_execution.timestamp = utils.get_timestamp()

    # get test data for each test present in the list of tests
    # for each test in the list, for each data set and driver combination
    # append an entry to the execution_list
    execution_list = []
    for test_case in tests:
        data_sets = utils.get_test_data(workspace, project, test_case)
        for data_set in data_sets:
            for driver in drivers:
                execution_list.append({
                    'test_name': test_case,
                    'data_set': vars(data_set),
                    'driver': driver,
                    'report_directory': None
                })
    if is_suite:
        execution_directory = report.create_suite_execution_directory(
            test_execution.root_path, test_execution.project,
            report_suite_name, test_execution.timestamp)
    else:
        execution_directory = report.create_test_execution_directory(
            test_execution.root_path, test_execution.project, test,
            test_execution.timestamp)
    #
    for test in execution_list:
        # generate a report directory for this test
        report_directory = report.create_report_directory(
            execution_directory, test['test_name'], is_suite)
        test['report_directory'] = report_directory

    if suite:
        if hasattr(suite_module, 'before'):
            suite_module.before()

    if test_execution.interactive and threads == 1:
        if threads == 1:
            # run tests serially
            for test in execution_list:
                run_test(test_execution.root_path, test_execution.project,
                         test['test_name'], test['data_set'], test['driver'],
                         test_execution.settings, test['report_directory'])
        else:
            print('Error: to run in debug mode, threads must equal one')
    else:
        # run list of tests using threading
        multiprocess_executor(execution_list, is_suite, execution_directory,
                              threads)

    if suite:
        if hasattr(suite_module, 'after'):
            suite_module.after()