Example #1
0
    def run(self, config):
        hint_filepath = '.avocado.hint'
        hint = None
        if os.path.exists(hint_filepath):
            hint = HintParser(hint_filepath)
        resolutions = resolver.resolve(config.get('nrun.references'), hint)
        tasks = job.resolutions_to_tasks(resolutions, config)
        # pylint: disable=W0201
        self.pending_tasks, missing_requirements = nrunner.check_tasks_requirements(
            tasks)
        if missing_requirements:
            missing_tasks_msg = "\n".join(
                [str(t) for t in missing_requirements])
            LOG_UI.warning(
                'Tasks will not be run due to missing requirements: %s',
                missing_tasks_msg)

        if not self.pending_tasks:
            LOG_UI.error('No test to be executed, exiting...')
            sys.exit(exit_codes.AVOCADO_JOB_FAIL)

        if not config.get('nrun.disable_task_randomization'):
            random.shuffle(self.pending_tasks)

        self.spawned_tasks = []  # pylint: disable=W0201

        try:
            if config.get('nrun.spawners.podman.enabled'):
                if not os.path.exists(PodmanSpawner.PODMAN_BIN):
                    msg = ('Podman Spawner selected, but podman binary "%s" '
                           'is not available on the system.  Please install '
                           'podman before attempting to use this feature.')
                    msg %= PodmanSpawner.PODMAN_BIN
                    LOG_UI.error(msg)
                    sys.exit(exit_codes.AVOCADO_JOB_FAIL)
                self.spawner = PodmanSpawner()  # pylint: disable=W0201
            else:
                self.spawner = ProcessSpawner()  # pylint: disable=W0201
            listen = config.get('nrun.status_server.listen')
            verbose = config.get('core.verbose')
            self.status_server = nrunner.StatusServer(
                listen,  # pylint: disable=W0201
                [t.identifier for t in self.pending_tasks],
                verbose)
            self.status_server.start()
            parallel_tasks = config.get('nrun.parallel_tasks')
            loop = asyncio.get_event_loop()
            loop.run_until_complete(self.spawn_tasks(parallel_tasks))
            loop.run_until_complete(self.status_server.wait())
            self.report_results()
            exit_code = exit_codes.AVOCADO_ALL_OK
            if self.status_server.result.get('fail') is not None:
                exit_code |= exit_codes.AVOCADO_TESTS_FAIL
            elif self.status_server.result.get('error') is not None:
                exit_code |= exit_codes.AVOCADO_TESTS_FAIL
            return exit_code
        except Exception as e:  # pylint: disable=W0703
            LOG_UI.error(e)
            return exit_codes.AVOCADO_FAIL
Example #2
0
    def run_suite(self, job, result, test_suite, variants, timeout=0,
                  replay_map=None, execution_order=None):
        summary = set()
        test_suite, _ = nrunner.check_tasks_requirements(test_suite)
        result.tests_total = len(test_suite)  # no support for variants yet
        result_dispatcher = job.result_events_dispatcher

        for index, task in enumerate(test_suite):
            task.known_runners = nrunner.RUNNERS_REGISTRY_PYTHON_CLASS
            index += 1
            # this is all rubbish data
            early_state = {
                'name': test.TestID(index, task.identifier),
                'job_logdir': job.logdir,
                'job_unique_id': job.unique_id,
            }
            result.start_test(early_state)
            job.result_events_dispatcher.map_method('start_test',
                                                    result,
                                                    early_state)

            statuses = []
            task.status_services = []
            for status in task.run():
                result_dispatcher.map_method('test_progress', False)
                statuses.append(status)
                if status['status'] not in ["started", "running"]:
                    break

            # test execution time is currently missing
            # since 358e800e81 all runners all produce the result in a key called
            # 'result', instead of 'status'.  But the Avocado result plugins rely
            # on the current runner approach
            test_state = {'status': statuses[-1]['result'].upper()}
            test_state.update(early_state)

            time_start = statuses[0]['time']
            time_end = statuses[-1]['time']
            time_elapsed = time_end - time_start
            test_state['time_start'] = time_start
            test_state['time_end'] = time_end
            test_state['time_elapsed'] = time_elapsed

            # fake log dir, needed by some result plugins such as HTML
            test_state['logdir'] = ''

            # Populate task dir
            base_path = os.path.join(job.logdir, 'test-results')
            self._populate_task_logdir(base_path,
                                       task,
                                       statuses,
                                       job.config.get('core.debug'))

            result.check_test(test_state)
            result_dispatcher.map_method('end_test', result, test_state)
        return summary
Example #3
0
    def run_suite(self,
                  job,
                  result,
                  test_suite,
                  variants,
                  timeout=0,
                  replay_map=None,
                  execution_order=None):
        summary = set()
        test_suite, _ = check_tasks_requirements(test_suite,
                                                 self.KNOWN_EXTERNAL_RUNNERS)  # pylint: disable=W0201
        result.tests_total = len(test_suite)  # no support for variants yet
        result_dispatcher = job.result_events_dispatcher

        for index, task in enumerate(test_suite):
            index += 1
            # this is all rubbish data
            early_state = {
                'name': test.TestID(index, task.identifier),
                'job_logdir': job.logdir,
                'job_unique_id': job.unique_id,
            }
            result.start_test(early_state)
            job.result_events_dispatcher.map_method('start_test', result,
                                                    early_state)

            statuses = []
            task.status_services = []
            for status in task.run():
                result_dispatcher.map_method('test_progress', False)
                statuses.append(status)
                if status['status'] not in ["init", "running"]:
                    break

            # test execution time is currently missing
            test_state = {'status': statuses[-1]['status'].upper()}
            test_state.update(early_state)

            time_start = statuses[0]['time_start']
            time_end = statuses[-1]['time_end']
            time_elapsed = time_end - time_start
            test_state['time_start'] = time_start
            test_state['time_end'] = time_end
            test_state['time_elapsed'] = time_elapsed

            # fake log dir, needed by some result plugins such as HTML
            test_state['logdir'] = ''

            result.check_test(test_state)
            result_dispatcher.map_method('end_test', result, test_state)
        return summary
Example #4
0
    def run_suite(self, job, test_suite):
        # pylint: disable=W0201
        self.summary = set()

        test_suite.tests, _ = nrunner.check_tasks_requirements(
            test_suite.tests)
        job.result.tests_total = test_suite.size  # no support for variants yet

        listen = test_suite.config.get('nrunner.status_server_listen')
        self._start_status_server(listen)

        # pylint: disable=W0201
        self.tasks = self._get_all_runtime_tasks(test_suite)
        if test_suite.config.get('nrunner.shuffle'):
            random.shuffle(self.tasks)
        tsm = TaskStateMachine(self.tasks)
        spawner_name = test_suite.config.get('nrunner.spawner')
        spawner = SpawnerDispatcher(test_suite.config)[spawner_name].obj
        max_running = min(test_suite.config.get('nrunner.max_parallel_tasks'),
                          len(self.tasks))
        timeout = test_suite.config.get('task.timeout.running')
        workers = [
            Worker(state_machine=tsm,
                   spawner=spawner,
                   max_running=max_running,
                   task_timeout=timeout).run() for _ in range(max_running)
        ]
        asyncio.ensure_future(self._update_status(job))
        loop = asyncio.get_event_loop()
        try:
            loop.run_until_complete(
                asyncio.wait_for(asyncio.gather(*workers), job.timeout
                                 or None))
        except (KeyboardInterrupt, asyncio.TimeoutError):
            self.summary.add("INTERRUPTED")

        # Wait until all messages may have been processed by the
        # status_updater. This should be replaced by a mechanism
        # that only waits if there are missing status messages to
        # be processed, and, only for a given amount of time.
        # Tests with non received status will always show as SKIP
        # because of result reconciliation.
        loop.run_until_complete(asyncio.sleep(0.05))

        job.result.end_tests()
        self.status_server.close()
        return self.summary