Example #1
0
def multiprocess_executor(execution_list, is_suite, execution_directory, threads=1):
    print('Executing:')
    for test in execution_list:
        print('{} in {} with the following data: {}'.format(test['test_name'],
                                                            test['driver'],
                                                            test['data_set']))

    # 
    start_time = time.time()

    pool = Pool(processes=threads)

    results = []

    for test in execution_list:

        args = (test_execution.root_path,
                test_execution.project,
                test['test_name'],
                test['data_set'],
                test['driver'],
                test_execution.settings,
                test['report_directory'])
        apply_async = pool.apply_async(run_test, args=args)
        results.append(apply_async)

    map(ApplyResult.wait, results)

    lst_results = [r.get() for r in results]

    # for res in lst_results:
    #    print '\none result\n',res

    pool.close()
    pool.join()

    elapsed_time = round(time.time() - start_time, 2)
    
    # generate execution_result.json
    report_parser.generate_execution_report(execution_directory, elapsed_time)
Example #2
0
    def execute(self):
        start_time = time.time()
        suite_error = False

        # run suite `before` function
        if self.suite.before:
            try:
                self.suite.before.__call__()
            except:
                print('ERROR: suite before function failed')
                print(traceback.format_exc())

        if not suite_error:
            if self.interactive and self.execution.processes != 1:
                print('WARNING: to run in debug mode, threads must equal one')

            if self.execution.processes == 1:
                # run tests serially
                for test in self.execution.tests:
                    run_test(test_execution.root_path, self.project, test.name,
                             test.data_set, test.browser,
                             test_execution.settings, test.reportdir)
            else:
                # run tests using multiprocessing
                multiprocess_executor(self.project, self.execution.tests,
                                      self.execution.processes)

        # run suite `after` function
        if self.suite.after:
            try:
                self.suite.after.__call__()
            except:
                print('ERROR: suite before function failed')
                print(traceback.format_exc())

        # generate execution_result.json
        elapsed_time = round(time.time() - start_time, 2)
        report_parser.generate_execution_report(self.execution.reportdir,
                                                elapsed_time)
Example #3
0
    def _finalize(self):
        elapsed_time = self._get_elapsed_time(self.start_time)

        # generate report.json
        self.report = report_parser.generate_execution_report(
            self.execution.reportdir, elapsed_time, self.execution.browsers,
            self.execution.processes, self.execution.envs, self.execution.tags,
            session.settings['remote_url'])
        if self.is_suite or len(self.execution.tests) > 1:
            self._print_results()
        # generate requested reports
        if self.is_suite:
            report_name = self.report_name or 'report'
            report_folder = self.report_folder or self.execution.reportdir
            if 'junit' in self.reports:
                report_parser.generate_junit_report(self.execution.reportdir,
                                                    self.suite_name,
                                                    self.timestamp,
                                                    self.report_folder,
                                                    report_name)
            if 'json' in self.reports and (self.report_folder
                                           or self.report_name):
                report_parser.save_execution_json_report(
                    self.report, report_folder, report_name)
            if 'html' in self.reports:
                gui_utils.generate_html_report(self.project, self.suite_name,
                                               self.timestamp,
                                               self.report_folder, report_name)
            if 'html-no-images' in self.reports:
                if 'html' in self.reports:
                    report_name = report_name + '-no-images'
                gui_utils.generate_html_report(self.project,
                                               self.suite_name,
                                               self.timestamp,
                                               self.report_folder,
                                               report_name,
                                               no_images=True)

        # exit to the console with exit status code 1 in case a test fails
        if self.execution.has_failed_tests.value:
            sys.exit(1)
Example #4
0
    def _execute(self):
        start_time = time.time()
        suite_error = False

        # run suite `before` function
        if self.suite.before:
            try:
                self.suite.before.__call__()
            except:
                print('ERROR: suite before function failed')
                print(traceback.format_exc())

        if not suite_error:
            if self.interactive and self.execution.processes != 1:
                print('WARNING: to run in debug mode, threads must equal one')

            if self.execution.processes == 1:
                # run tests serially
                for test in self.execution.tests:
                    run_test(test_execution.root_path, self.project, test.name,
                             test.data_set, test.secrets, test.browser,
                             test_execution.settings, test.reportdir,
                             self.execution.has_failed_tests)
            else:
                # run tests using multiprocessing
                multiprocess_executor(self.project, self.execution.tests,
                                      self.execution.has_failed_tests,
                                      self.execution.processes)

        # run suite `after` function
        if self.suite.after:
            try:
                self.suite.after.__call__()
            except:
                print('ERROR: suite before function failed')
                print(traceback.format_exc())

        # generate report.json
        elapsed_time = round(time.time() - start_time, 2)
        self.report = report_parser.generate_execution_report(self.execution.reportdir,
                                                              elapsed_time)

        if self.is_suite:
            self._print_results()

        # generate requested reports
        if self.is_suite:
            report_name = self.report_name or 'report'
            report_folder = self.report_folder or self.execution.reportdir
            if 'junit' in self.reports:
                report_parser.generate_junit_report(self.execution.reportdir,
                                                    self.suite_name, self.timestamp,
                                                    self.report_folder, report_name)
            if 'json' in self.reports and (self.report_folder or self.report_name):
                report_parser.save_execution_json_report(self.report, report_folder, report_name)
            if 'html' in self.reports:
                gui_utils.generate_html_report(self.project, self.suite_name,
                                               self.timestamp, self.report_folder,
                                               report_name)
            if 'html-no-images' in self.reports:
                if 'html' in self.reports:
                    report_name = report_name + '-no-images'
                gui_utils.generate_html_report(self.project, self.suite_name, self.timestamp,
                                               self.report_folder, report_name,
                                               no_images=True)

        # exit to the console with exit status code 1 in case a test fails
        if self.execution.has_failed_tests.value:
            sys.exit(1)
Example #5
0
def run_test_or_suite(workspace,
                      project,
                      test=None,
                      suite=None,
                      directory=None):
    """Run a suite or test or directory containing tests."""
    execution = {
        'tests': [],
        'workers': 1,
        'drivers': [],
        'environments': [],
        'suite_before': None,
        'suite_after': None
    }

    suite_amount_workers = None
    suite_drivers = None
    suite_envs = []
    suite_name = None
    is_suite = False

    if test:
        execution['tests'] = [test]
        suite_name = 'single_tests'
    elif suite:
        execution['tests'] = suite_module.get_suite_test_cases(
            workspace, project, suite)
        suite_amount_workers = suite_module.get_suite_amount_of_workers(
            workspace, project, suite)
        suite_drivers = suite_module.get_suite_browsers(
            workspace, project, suite)
        suite_envs = suite_module.get_suite_environments(
            workspace, project, suite)
        suite_imported_module = suite_module.get_suite_module(
            workspace, project, suite)
        execution['suite_before'] = getattr(suite_imported_module, 'before',
                                            None)
        execution['suite_after'] = getattr(suite_imported_module, 'after',
                                           None)
        suite_name = suite
        is_suite = True
    elif directory:
        execution['tests'] = utils.get_directory_test_cases(
            workspace, project, directory)
        suite_name = directory
        is_suite = True
    else:
        sys.exit("ERROR: invalid arguments for run_test_or_suite()")

    # warn if no tests were found
    if len(execution['tests']) == 0:
        print('Warning: no tests were found')

    # get amount of workers (parallel executions), default is 1
    if test_execution.thread_amount:
        # the thread count passed through cli has higher priority
        execution['workers'] = test_execution.thread_amount
    elif suite_amount_workers:
        execution['workers'] = suite_amount_workers

    # select the drivers to use in this execution
    # the order of precedence is:
    # 1. drivers defined by CLI
    # 2. drivers defined inside a suite
    # 3. 'default_driver' setting
    # 4. default default is 'chrome'
    settings_default_driver = test_execution.settings['default_browser']
    selected_drivers = utils.choose_driver_by_precedence(
        cli_drivers=test_execution.cli_drivers,
        suite_drivers=suite_drivers,
        settings_default_driver=settings_default_driver)

    # Define the attributes for each driver
    #
    # A driver can be predefined ('chrome, 'chrome-headless', 'firefox', etc)
    # or it can be defined by the user with the 'remote_browsers' setting.
    # Remote browsers have extra details such as capabilities
    #
    # Each driver must have the following attributes:
    # 'name': real name,
    # 'full_name': the remote_browser name defined by the user,
    # 'remote': is this a remote_browser or not
    # 'capabilities': full capabilities defined in the remote_browsers setting
    remote_browsers = settings_manager.get_remote_browsers(
        test_execution.settings)
    default_browsers = gui_utils.get_supported_browsers_suggestions()
    execution['drivers'] = _define_drivers(selected_drivers, remote_browsers,
                                           default_browsers)

    # Generate timestamp if needed
    # A timestamp is passed when the test is executed from the GUI.
    # The gui uses this timestamp to fetch the test execution status later on.
    # Otherwise, a new timestamp should be generated at this point
    if not test_execution.timestamp:
        test_execution.timestamp = utils.get_timestamp()

    # Select which envs to use
    # The user can define environments in the environments.json file.
    # The suite/test can be executed in one or more of these environments.
    # Which environments to use is defined by this order of preference:
    # 1. envs passed by CLI
    # 2. envs defined inside the suite
    # 3. The first env defined
    # 4. no envs at all
    #
    # Note, in the case of 4, the test might fail if it tries
    # to use env variables
    cli_envs = test_execution.cli_environments
    project_envs = environment_manager.get_envs(workspace, project)
    execution['environments'] = _select_environments(cli_envs, suite_envs,
                                                     project_envs)

    # Generate the execution list
    #
    # Each test must be executed for each:
    # * data set
    # * environment
    # * driver
    #
    # The result is a list that contains all the requested combinations
    execution_list = _define_execution_list(workspace, project, execution)

    # create the execution directory
    #
    # if this is a suite, the directory takes this structure
    #   reports/<suite_name>/<timestamp>/
    #
    # if this is a single test, the directory takes this structure:
    #   reports/single_tests/<test_name>/<timestamp>/
    execution_directory = _create_execution_directory(workspace,
                                                      project,
                                                      test_execution.timestamp,
                                                      test_name=test,
                                                      suite_name=suite_name,
                                                      is_suite=is_suite)
    # for each test, create the test directory
    # for example, in a suite 'suite1' with a 'test1':
    # reports/suite1/2017.07.02.19.22.20.001/test1/set_00001/
    for test in execution_list:
        report_directory = report.create_report_directory(
            execution_directory, test['test_name'], is_suite)
        test['report_directory'] = report_directory

    # EXECUTION

    start_time = time.time()
    suite_error = False

    # run suite `before` function
    if execution['suite_before']:
        try:
            execution['suite_before'].__call__()
        except:
            print('ERROR: suite before function failed')
            print(traceback.format_exc())

    if not suite_error:
        if test_execution.interactive and execution['workers'] != 1:
            print('WARNING: to run in debug mode, threads must equal one')

        if execution['workers'] == 1:
            # run tests serially
            for test in execution_list:
                run_test(workspace, project, test['test_name'],
                         test['data_set'], test['driver'],
                         test_execution.settings, test['report_directory'])
        else:
            # run tests using multiprocessing
            multiprocess_executor(execution_list, execution['workers'])

    # run suite `after` function
    if execution['suite_after']:
        try:
            execution['suite_after'].__call__()
        except:
            print('ERROR: suite before function failed')
            print(traceback.format_exc())

    # generate execution_result.json
    elapsed_time = round(time.time() - start_time, 2)
    report_parser.generate_execution_report(execution_directory, elapsed_time)