コード例 #1
0
 def _create_status_server(self, test_suite, job):
     listen = self._determine_status_server_uri(test_suite)
     # pylint: disable=W0201
     self.status_repo = StatusRepo(job.unique_id)
     # pylint: disable=W0201
     self.status_server = StatusServer(listen,
                                       self.status_repo)
コード例 #2
0
 def _start_status_server(self, status_server_listen):
     # pylint: disable=W0201
     self.status_repo = StatusRepo()
     # pylint: disable=W0201
     self.status_server = StatusServer(status_server_listen,
                                       self.status_repo)
     asyncio.ensure_future(self.status_server.serve_forever())
コード例 #3
0
    def run_suite(self, job, test_suite):
        """
        Run one or more tests and report with test result.

        :param job: job that includes the test suite
        :type test_suite: :py:class:`avocado.core.job.Job`
        :param test_suite: test suite with some tests to run
        :type test_suite: :py:class:`avocado.core.suite.TestSuite`
        :returns: a set with types of test failures
        :rtype: :py:class:`set`
        """
        self.job = job

        self.status_repo = StatusRepo(job.unique_id)
        self.status_server = StatusServer(
            job.config.get('nrunner.status_server_listen'), self.status_repo)
        asyncio.ensure_future(self.status_server.serve_forever())
        # TODO: this needs more customization
        asyncio.ensure_future(self._update_status(job))

        graph = self._graph_from_suite(test_suite)
        summary = set()
        params = self.job.config["param_dict"]

        self.tasks = []
        self.slots = params.get("slots", "").split(" ")

        # TODO: fix other run_traversal calls
        try:
            graph.visualize(self.job.logdir)
            self.run_traversal(graph, params)
            if not self.all_tests_ok:
                # the summary is a set so only a single failed test is enough
                summary.add('FAIL')
        except KeyboardInterrupt:
            summary.add('INTERRUPTED')

        # TODO: the avocado implementation needs a workaround here:
        # Wait until all messages may have been processed by the
        # status_updater. This should be replaced by a mechanism
        # that only waits if there are missing status messages to
        # be processed, and, only for a given amount of time.
        # Tests with non received status will always show as SKIP
        # because of result reconciliation.
        time.sleep(0.05)

        self.job.result.end_tests()
        self.job.funcatexit.run()
        self.status_server.close()
        signal.signal(signal.SIGTSTP, signal.SIG_IGN)
        return summary
コード例 #4
0
    async def test(self):
        number_of_tasks = 80
        number_of_workers = 8

        runnable = Runnable("noop", "noop")
        runtime_tasks = [RuntimeTask(Task(runnable, "%03i" % _))
                         for _ in range(1, number_of_tasks + 1)]
        spawner = Spawner()
        status_repo = StatusRepo()

        state_machine = statemachine.TaskStateMachine(runtime_tasks, status_repo)
        workers = [statemachine.Worker(state_machine, spawner).run()
                   for _ in range(number_of_workers)]

        await asyncio.gather(*workers)
        self.assertEqual(number_of_tasks, len(state_machine.finished))
コード例 #5
0
    async def run_test(self, job, node):
        """
        Run a test instance inside a subprocess.

        :param job: job that includes the test suite
        :type job: :py:class:`avocado.core.job.Job`
        :param node: test node to run
        :type node: :py:class:`TestNode`
        """
        if node.spawner is None:
            default_slot = self.slots[0] if len(self.slots) > 0 else ""
            node.set_environment(job, default_slot)
        # once the slot is set (here or earlier), the hostname reflects it
        hostname = node.params["hostname"]
        hostname = "localhost" if not hostname else hostname
        logging.debug(f"Running {node.id} on {hostname}")

        if not self.status_repo:
            self.status_repo = StatusRepo(job.unique_id)
            self.status_server = StatusServer(
                job.config.get('nrunner.status_server_listen'),
                self.status_repo)
            asyncio.ensure_future(self.status_server.serve_forever())
            # TODO: this needs more customization
            asyncio.ensure_future(self._update_status(job))

        raw_task = nrunner.Task(node.get_runnable(),
                                node.id_test,
                                [job.config.get('nrunner.status_server_uri')],
                                nrunner.RUNNERS_REGISTRY_PYTHON_CLASS,
                                job_id=self.job.unique_id)
        task = RuntimeTask(raw_task)
        self.tasks += [task]

        # TODO: use a single state machine for all test nodes when we are able
        # to at least add requested tasks to it safely (using its locks)
        await Worker(
            state_machine=TaskStateMachine([task], self.status_repo),
            spawner=node.spawner,
            max_running=1,
            task_timeout=job.config.get('task.timeout.running')).run()
コード例 #6
0
class Runner(RunnerInterface):

    name = 'nrunner'
    description = 'nrunner based implementation of job compliant runner'

    def _save_to_file(self, filename, buff, mode='wb'):
        with open(filename, mode) as fp:
            fp.write(buff)

    def _populate_task_logdir(self, base_path, task, statuses, debug=False):
        # We are copying here to avoid printing duplicated information
        local_statuses = copy(statuses)
        last = local_statuses[-1]
        try:
            stdout = last.pop('stdout')
        except KeyError:
            stdout = None
        try:
            stderr = last.pop('stderr')
        except KeyError:
            stderr = None

        # Create task dir
        task_path = os.path.join(base_path, task.identifier.str_filesystem)
        os.makedirs(task_path, exist_ok=True)

        # Save stdout and stderr
        if stdout is not None:
            stdout_file = os.path.join(task_path, 'stdout')
            self._save_to_file(stdout_file, stdout)
        if stderr is not None:
            stderr_file = os.path.join(task_path, 'stderr')
            self._save_to_file(stderr_file, stderr)

        # Save debug
        if debug:
            debug = os.path.join(task_path, 'debug')
            with open(debug, 'w') as fp:
                json.dump(local_statuses, fp)

        data_file = os.path.join(task_path, 'data')
        with open(data_file, 'w') as fp:
            fp.write("{}\n".format(task.output_dir))

    def _get_all_runtime_tasks(self, test_suite):
        result = []
        no_digits = len(str(len(test_suite)))
        for index, task in enumerate(test_suite.tests, start=1):
            task.known_runners = nrunner.RUNNERS_REGISTRY_PYTHON_CLASS
            # this is all rubbish data
            if test_suite.name:
                prefix = "{}-{}".format(test_suite.name, index)
            else:
                prefix = index
            test_id = TestID(prefix, task.runnable.uri, None, no_digits)
            task.identifier = test_id
            result.append(RuntimeTask(task))
        return result

    def _start_status_server(self, status_server_listen):
        # pylint: disable=W0201
        self.status_repo = StatusRepo()
        # pylint: disable=W0201
        self.status_server = StatusServer(status_server_listen,
                                          self.status_repo)
        asyncio.ensure_future(self.status_server.serve_forever())

    async def _update_status(self, job):
        tasks_by_id = {
            str(runtime_task.task.identifier): runtime_task.task
            for runtime_task in self.tasks
        }
        while True:
            try:
                (task_id, status,
                 _) = self.status_repo.status_journal_summary.pop(0)

            except IndexError:
                await asyncio.sleep(0.05)
                continue

            task = tasks_by_id.get(task_id)
            early_state = {
                'name': task.identifier,
                'job_logdir': job.logdir,
                'job_unique_id': job.unique_id
            }
            if status == 'started':
                job.result.start_test(early_state)
                job.result_events_dispatcher.map_method(
                    'start_test', job.result, early_state)
            elif status == 'finished':
                this_task_data = self.status_repo.get_task_data(task_id)
                last_task_status = this_task_data[-1]
                test_state = {'status': last_task_status.get('result').upper()}
                test_state.update(early_state)

                time_start = this_task_data[0]['time']
                time_end = last_task_status['time']
                time_elapsed = time_end - time_start
                test_state['time_start'] = time_start
                test_state['time_end'] = time_end
                test_state['time_elapsed'] = time_elapsed

                # fake log dir, needed by some result plugins such as HTML
                test_state['logdir'] = ''

                base_path = os.path.join(job.logdir, 'test-results')
                self._populate_task_logdir(base_path, task, this_task_data,
                                           job.config.get('core.debug'))

                job.result.check_test(test_state)
                job.result_events_dispatcher.map_method(
                    'end_test', job.result, test_state)

                if not mapping[test_state['status']]:
                    self.summary.add("FAIL")

    def run_suite(self, job, test_suite):
        # pylint: disable=W0201
        self.summary = set()

        test_suite.tests, _ = nrunner.check_tasks_requirements(
            test_suite.tests)
        job.result.tests_total = test_suite.size  # no support for variants yet

        listen = test_suite.config.get('nrunner.status_server_listen')
        self._start_status_server(listen)

        # pylint: disable=W0201
        self.tasks = self._get_all_runtime_tasks(test_suite)
        if test_suite.config.get('nrunner.shuffle'):
            random.shuffle(self.tasks)
        tsm = TaskStateMachine(self.tasks)
        spawner_name = test_suite.config.get('nrunner.spawner')
        spawner = SpawnerDispatcher(test_suite.config)[spawner_name].obj
        max_running = min(test_suite.config.get('nrunner.max_parallel_tasks'),
                          len(self.tasks))
        workers = [
            Worker(tsm, spawner, max_running=max_running).run()
            for _ in range(max_running)
        ]
        asyncio.ensure_future(self._update_status(job))
        loop = asyncio.get_event_loop()
        try:
            loop.run_until_complete(
                asyncio.wait_for(asyncio.gather(*workers), job.timeout
                                 or None))
        except (KeyboardInterrupt, asyncio.TimeoutError):
            self.summary.add("INTERRUPTED")

        # Wait until all messages may have been processed by the
        # status_updater. This should be replaced by a mechanism
        # that only waits if there are missing status messages to
        # be processed, and, only for a given amount of time.
        # Tests with non received status will always show as SKIP
        # because of result reconciliation.
        loop.run_until_complete(asyncio.sleep(0.05))

        job.result.end_tests()
        self.status_server.close()
        return self.summary
コード例 #7
0
ファイル: runner_nrunner.py プロジェクト: cforno12/avocado
class Runner(RunnerInterface):

    name = 'nrunner'
    description = 'nrunner based implementation of job compliant runner'

    @staticmethod
    def _get_requirements_runtime_tasks(runnable, prefix):
        if runnable.requirements is None:
            return

        # creates the runnables for the requirements
        requirements_runnables = RequirementsResolver.resolve(runnable)
        requirements_runtime_tasks = []
        # creates the tasks and runtime tasks for the requirements
        for requirement_runnable in requirements_runnables:
            name = '%s-%s' % (requirement_runnable.kind,
                              requirement_runnable.kwargs.get('name'))
            # the human UI works with TestID objects, so we need to
            # use it to name other tasks
            task_id = TestID(prefix, name, None)
            # with --dry-run we don't want to run requirement
            if runnable.kind == 'dry-run':
                requirement_runnable.kind = 'noop'
            # creates the requirement task
            requirement_task = nrunner.Task(requirement_runnable,
                                            identifier=task_id,
                                            category='requirement')
            # make sure we track the dependencies of a task
            # runtime_task.task.dependencies.add(requirement_task)
            # created the requirement runtime task
            requirements_runtime_tasks.append(RuntimeTask(requirement_task))

        return requirements_runtime_tasks

    @staticmethod
    def _create_runtime_tasks_for_test(test_suite, runnable, no_digits, index,
                                       variant):
        """Creates runtime tasks for both tests, and for its requirements."""
        result = []

        # test related operations
        # create test ID
        if test_suite.name:
            prefix = "{}-{}".format(test_suite.name, index)
        else:
            prefix = index
        test_id = TestID(prefix, runnable.uri, variant, no_digits)
        # inject variant on runnable
        runnable.variant = dump_variant(variant)

        # handles the test task
        task = nrunner.Task(
            runnable,
            identifier=test_id,
            known_runners=nrunner.RUNNERS_REGISTRY_PYTHON_CLASS)
        runtime_task = RuntimeTask(task)
        result.append(runtime_task)

        # handles the requirements
        requirements_runtime_tasks = (Runner._get_requirements_runtime_tasks(
            runnable, prefix))
        # extend the list of tasks with the requirements runtime tasks
        if requirements_runtime_tasks is not None:
            for requirement_runtime_task in requirements_runtime_tasks:
                # make sure we track the dependencies of a task
                runtime_task.task.dependencies.add(
                    requirement_runtime_task.task)
            result.extend(requirements_runtime_tasks)

        return result

    @staticmethod
    def _get_all_runtime_tasks(test_suite):
        runtime_tasks = []
        test_result_total = test_suite.variants.get_number_of_tests(
            test_suite.tests)
        no_digits = len(str(test_result_total))
        # define execution order
        execution_order = test_suite.config.get('run.execution_order')
        if execution_order == "variants-per-test":
            test_variant = [(test, variant) for test in test_suite.tests
                            for variant in test_suite.variants.itertests()]
        elif execution_order == "tests-per-variant":
            test_variant = [(test, variant)
                            for variant in test_suite.variants.itertests()
                            for test in test_suite.tests]

        # decide if a copy of the runnable is needed, in case of more
        # variants than tests
        copy_runnable = len(test_variant) > len(test_suite.tests)
        # create runtime tasks
        for index, (runnable, variant) in enumerate(test_variant, start=1):
            if copy_runnable:
                runnable = deepcopy(runnable)
            runtime_tasks.extend(
                Runner._create_runtime_tasks_for_test(test_suite, runnable,
                                                      no_digits, index,
                                                      variant))
        return runtime_tasks

    def _start_status_server(self, status_server_listen):
        # pylint: disable=W0201
        self.status_repo = StatusRepo()
        # pylint: disable=W0201
        self.status_server = StatusServer(status_server_listen,
                                          self.status_repo)
        asyncio.ensure_future(self.status_server.serve_forever())

    async def _update_status(self, job):
        tasks_by_id = {
            str(runtime_task.task.identifier): runtime_task.task
            for runtime_task in self.runtime_tasks
        }
        message_handler = MessageHandler()
        while True:
            try:
                (task_id, _, _, index) = \
                    self.status_repo.status_journal_summary.pop(0)

            except IndexError:
                await asyncio.sleep(0.05)
                continue

            message = self.status_repo.get_task_data(task_id, index)
            task = tasks_by_id.get(task_id)
            message_handler.process_message(message, task, job)

    def run_suite(self, job, test_suite):
        summary = set()

        test_suite.tests, _ = nrunner.check_runnables_runner_requirements(
            test_suite.tests)
        job.result.tests_total = test_suite.variants.get_number_of_tests(
            test_suite.tests)

        listen = test_suite.config.get('nrunner.status_server_listen')
        self._start_status_server(listen)

        # pylint: disable=W0201
        self.runtime_tasks = self._get_all_runtime_tasks(test_suite)
        if test_suite.config.get('nrunner.shuffle'):
            random.shuffle(self.runtime_tasks)
        test_ids = [
            rt.task.identifier for rt in self.runtime_tasks
            if rt.task.category == 'test'
        ]
        tsm = TaskStateMachine(self.runtime_tasks, self.status_repo)
        spawner_name = test_suite.config.get('nrunner.spawner')
        spawner = SpawnerDispatcher(test_suite.config)[spawner_name].obj
        max_running = min(test_suite.config.get('nrunner.max_parallel_tasks'),
                          len(self.runtime_tasks))
        timeout = test_suite.config.get('task.timeout.running')
        workers = [
            Worker(state_machine=tsm,
                   spawner=spawner,
                   max_running=max_running,
                   task_timeout=timeout).run() for _ in range(max_running)
        ]
        asyncio.ensure_future(self._update_status(job))
        loop = asyncio.get_event_loop()
        try:
            loop.run_until_complete(
                asyncio.wait_for(asyncio.gather(*workers), job.timeout
                                 or None))
        except (KeyboardInterrupt, asyncio.TimeoutError, TestFailFast) as ex:
            LOG_JOB.info(str(ex))
            job.interrupted_reason = str(ex)
            summary.add("INTERRUPTED")

        # Wait until all messages may have been processed by the
        # status_updater. This should be replaced by a mechanism
        # that only waits if there are missing status messages to
        # be processed, and, only for a given amount of time.
        # Tests with non received status will always show as SKIP
        # because of result reconciliation.
        loop.run_until_complete(asyncio.sleep(0.05))

        job.result.end_tests()
        self.status_server.close()

        # Update the overall summary with found test statuses, which will
        # determine the Avocado command line exit status
        summary.update([
            status.upper()
            for status in self.status_repo.get_result_set_for_tasks(test_ids)
        ])
        return summary
コード例 #8
0
class Runner(RunnerInterface):

    name = 'nrunner'
    description = 'nrunner based implementation of job compliant runner'

    def _determine_status_server_uri(self, test_suite):
        # pylint: disable=W0201
        self.status_server_dir = None
        if test_suite.config.get('nrunner.status_server_auto'):
            # no UNIX domain sockets on Windows
            if platform.system() != 'Windows':
                self.status_server_dir = tempfile.TemporaryDirectory(
                    prefix='avocado_')
                return os.path.join(self.status_server_dir.name,
                                    '.status_server.sock')
        return test_suite.config.get('nrunner.status_server_listen')

    def _create_status_server(self, test_suite, job):
        listen = self._determine_status_server_uri(test_suite)
        # pylint: disable=W0201
        self.status_repo = StatusRepo(job.unique_id)
        # pylint: disable=W0201
        self.status_server = StatusServer(listen, self.status_repo)

    async def _update_status(self, job):
        tasks_by_id = {
            str(runtime_task.task.identifier): runtime_task.task
            for runtime_task in self.runtime_tasks
        }
        message_handler = MessageHandler()
        while True:
            try:
                (task_id, _, _, index) = \
                    self.status_repo.status_journal_summary.pop(0)

            except IndexError:
                await asyncio.sleep(0.05)
                continue

            message = self.status_repo.get_task_data(task_id, index)
            task = tasks_by_id.get(task_id)
            message_handler.process_message(message, task, job)

    @staticmethod
    def _abort_if_missing_runners(runnables):
        if runnables:
            missing_kinds = set([runnable.kind for runnable in runnables])
            msg = ("Could not find runners for runnable(s) of kind(s): %s" %
                   ", ".join(missing_kinds))
            raise JobError(msg)

    def run_suite(self, job, test_suite):
        summary = set()

        if not test_suite.enabled:
            job.interrupted_reason = f"Suite {test_suite.name} is disabled."
            return summary

        test_suite.tests, missing_requirements = nrunner.check_runnables_runner_requirements(
            test_suite.tests)
        self._abort_if_missing_runners(missing_requirements)

        job.result.tests_total = test_suite.variants.get_number_of_tests(
            test_suite.tests)

        self._create_status_server(test_suite, job)

        graph = RuntimeTaskGraph(test_suite.get_test_variants(),
                                 test_suite.name, self.status_server.uri,
                                 job.unique_id)
        # pylint: disable=W0201
        self.runtime_tasks = graph.get_tasks_in_topological_order()

        # Start the status server
        asyncio.ensure_future(self.status_server.serve_forever())

        if test_suite.config.get('nrunner.shuffle'):
            random.shuffle(self.runtime_tasks)
        test_ids = [
            rt.task.identifier for rt in self.runtime_tasks
            if rt.task.category == 'test'
        ]
        tsm = TaskStateMachine(self.runtime_tasks, self.status_repo)
        spawner_name = test_suite.config.get('nrunner.spawner')
        spawner = SpawnerDispatcher(test_suite.config, job)[spawner_name].obj
        max_running = min(test_suite.config.get('nrunner.max_parallel_tasks'),
                          len(self.runtime_tasks))
        timeout = test_suite.config.get('task.timeout.running')
        failfast = test_suite.config.get('run.failfast')
        workers = [
            Worker(state_machine=tsm,
                   spawner=spawner,
                   max_running=max_running,
                   task_timeout=timeout,
                   failfast=failfast).run() for _ in range(max_running)
        ]
        asyncio.ensure_future(self._update_status(job))
        loop = asyncio.get_event_loop()
        try:
            loop.run_until_complete(
                asyncio.wait_for(asyncio.gather(*workers), job.timeout
                                 or None))
        except (KeyboardInterrupt, asyncio.TimeoutError, TestFailFast) as ex:
            LOG_JOB.info(str(ex))
            job.interrupted_reason = str(ex)
            summary.add("INTERRUPTED")

        # Wait until all messages may have been processed by the
        # status_updater. This should be replaced by a mechanism
        # that only waits if there are missing status messages to
        # be processed, and, only for a given amount of time.
        # Tests with non received status will always show as SKIP
        # because of result reconciliation.
        loop.run_until_complete(asyncio.sleep(0.05))

        job.result.end_tests()
        self.status_server.close()
        if self.status_server_dir is not None:
            self.status_server_dir.cleanup()

        # Update the overall summary with found test statuses, which will
        # determine the Avocado command line exit status
        summary.update([
            status.upper()
            for status in self.status_repo.get_result_set_for_tasks(test_ids)
        ])
        return summary
コード例 #9
0
class Runner(RunnerInterface):

    name = 'nrunner'
    description = 'nrunner based implementation of job compliant runner'

    def _get_requirements_runtime_tasks(self, runnable, prefix, job_id):
        if runnable.requirements is None:
            return

        # creates the runnables for the requirements
        requirements_runnables = RequirementsResolver.resolve(runnable)
        requirements_runtime_tasks = []
        # creates the tasks and runtime tasks for the requirements
        for requirement_runnable in requirements_runnables:
            name = '%s-%s' % (requirement_runnable.kind,
                              requirement_runnable.kwargs.get('name'))
            # the human UI works with TestID objects, so we need to
            # use it to name other tasks
            task_id = TestID(prefix,
                             name,
                             None)
            # with --dry-run we don't want to run requirement
            if runnable.kind == 'dry-run':
                requirement_runnable.kind = 'noop'
            # creates the requirement task
            requirement_task = nrunner.Task(requirement_runnable,
                                            identifier=task_id,
                                            status_uris=[self.status_server.uri],
                                            category='requirement',
                                            job_id=job_id)
            # make sure we track the dependencies of a task
            # runtime_task.task.dependencies.add(requirement_task)
            # created the requirement runtime task
            requirements_runtime_tasks.append(RuntimeTask(requirement_task))

        return requirements_runtime_tasks

    def _create_runtime_tasks_for_test(self, test_suite, runnable, no_digits,
                                       index, variant, job_id):
        """Creates runtime tasks for both tests, and for its requirements."""
        result = []

        # test related operations
        # create test ID
        if test_suite.name:
            prefix = "{}-{}".format(test_suite.name, index)
        else:
            prefix = index
        test_id = TestID(prefix,
                         runnable.identifier,
                         variant,
                         no_digits)
        # inject variant on runnable
        runnable.variant = dump_variant(variant)

        # handles the test task
        task = nrunner.Task(runnable,
                            identifier=test_id,
                            known_runners=nrunner.RUNNERS_REGISTRY_PYTHON_CLASS,
                            status_uris=[self.status_server.uri],
                            job_id=job_id)
        runtime_task = RuntimeTask(task)
        result.append(runtime_task)

        # handles the requirements
        requirements_runtime_tasks = (
            self._get_requirements_runtime_tasks(runnable,
                                                 prefix,
                                                 job_id))
        # extend the list of tasks with the requirements runtime tasks
        if requirements_runtime_tasks is not None:
            for requirement_runtime_task in requirements_runtime_tasks:
                # make sure we track the dependencies of a task
                runtime_task.task.dependencies.add(
                    requirement_runtime_task.task)
            result.extend(requirements_runtime_tasks)

        return result

    def _get_all_runtime_tasks(self, test_suite, job_id):
        runtime_tasks = []
        test_result_total = test_suite.variants.get_number_of_tests(test_suite.tests)
        no_digits = len(str(test_result_total))
        if test_suite.test_parameters:
            paths = ['/']
            tree_nodes = TreeNode().get_node(paths[0], True)
            tree_nodes.value = test_suite.test_parameters
            variant = {"variant": tree_nodes, "variant_id": None, "paths": paths}
            test_variant = [(test, variant) for test in test_suite.tests]

        else:
            # let's use variants when parameters are not available
            # define execution order
            execution_order = test_suite.config.get('run.execution_order')
            if execution_order == "variants-per-test":
                test_variant = [(test, variant) for test in test_suite.tests
                                for variant in test_suite.variants.itertests()]
            elif execution_order == "tests-per-variant":
                test_variant = [(test, variant)
                                for variant in test_suite.variants.itertests()
                                for test in test_suite.tests]

        # decide if a copy of the runnable is needed, in case of more
        # variants than tests
        copy_runnable = len(test_variant) > len(test_suite.tests)
        # create runtime tasks
        for index, (runnable, variant) in enumerate(test_variant, start=1):
            if copy_runnable:
                runnable = deepcopy(runnable)
            runtime_tasks.extend(self._create_runtime_tasks_for_test(
                test_suite,
                runnable,
                no_digits,
                index,
                variant,
                job_id))
        return runtime_tasks

    def _determine_status_server_uri(self, test_suite):
        # pylint: disable=W0201
        self.status_server_dir = None
        if test_suite.config.get('nrunner.status_server_auto'):
            # no UNIX domain sockets on Windows
            if platform.system() != 'Windows':
                self.status_server_dir = tempfile.TemporaryDirectory(
                    prefix='avocado_')
                return os.path.join(self.status_server_dir.name,
                                    '.status_server.sock')
        return test_suite.config.get('nrunner.status_server_listen')

    def _create_status_server(self, test_suite, job):
        listen = self._determine_status_server_uri(test_suite)
        # pylint: disable=W0201
        self.status_repo = StatusRepo(job.unique_id)
        # pylint: disable=W0201
        self.status_server = StatusServer(listen,
                                          self.status_repo)

    async def _update_status(self, job):
        tasks_by_id = {str(runtime_task.task.identifier): runtime_task.task
                       for runtime_task in self.runtime_tasks}
        message_handler = MessageHandler()
        while True:
            try:
                (task_id, _, _, index) = \
                    self.status_repo.status_journal_summary.pop(0)

            except IndexError:
                await asyncio.sleep(0.05)
                continue

            message = self.status_repo.get_task_data(task_id, index)
            task = tasks_by_id.get(task_id)
            message_handler.process_message(message, task, job)

    @staticmethod
    def _abort_if_missing_runners(runnables):
        if runnables:
            missing_kinds = set([runnable.kind for runnable in runnables])
            msg = ("Could not find runners for runnable(s) of kind(s): %s"
                   % ", ".join(missing_kinds))
            raise JobError(msg)

    def run_suite(self, job, test_suite):
        summary = set()

        if not test_suite.enabled:
            job.interrupted_reason = f"Suite {test_suite.name} is disabled."
            return summary

        test_suite.tests, missing_requirements = nrunner.check_runnables_runner_requirements(
            test_suite.tests)
        self._abort_if_missing_runners(missing_requirements)

        job.result.tests_total = test_suite.variants.get_number_of_tests(test_suite.tests)

        self._create_status_server(test_suite, job)

        # pylint: disable=W0201
        self.runtime_tasks = self._get_all_runtime_tasks(test_suite, job.unique_id)

        # Start the status server
        asyncio.ensure_future(self.status_server.serve_forever())

        if test_suite.config.get('nrunner.shuffle'):
            random.shuffle(self.runtime_tasks)
        test_ids = [rt.task.identifier for rt in self.runtime_tasks
                    if rt.task.category == 'test']
        tsm = TaskStateMachine(self.runtime_tasks, self.status_repo)
        spawner_name = test_suite.config.get('nrunner.spawner')
        spawner = SpawnerDispatcher(test_suite.config)[spawner_name].obj
        spawner.job_output_dir = job.test_results_path
        max_running = min(test_suite.config.get('nrunner.max_parallel_tasks'),
                          len(self.runtime_tasks))
        timeout = test_suite.config.get('task.timeout.running')
        failfast = test_suite.config.get('run.failfast')
        workers = [Worker(state_machine=tsm,
                          spawner=spawner,
                          max_running=max_running,
                          task_timeout=timeout,
                          failfast=failfast).run()
                   for _ in range(max_running)]
        asyncio.ensure_future(self._update_status(job))
        loop = asyncio.get_event_loop()
        try:
            loop.run_until_complete(asyncio.wait_for(asyncio.gather(*workers),
                                                     job.timeout or None))
        except (KeyboardInterrupt, asyncio.TimeoutError, TestFailFast) as ex:
            LOG_JOB.info(str(ex))
            job.interrupted_reason = str(ex)
            summary.add("INTERRUPTED")

        # Wait until all messages may have been processed by the
        # status_updater. This should be replaced by a mechanism
        # that only waits if there are missing status messages to
        # be processed, and, only for a given amount of time.
        # Tests with non received status will always show as SKIP
        # because of result reconciliation.
        loop.run_until_complete(asyncio.sleep(0.05))

        job.result.end_tests()
        self.status_server.close()
        if self.status_server_dir is not None:
            self.status_server_dir.cleanup()

        # Update the overall summary with found test statuses, which will
        # determine the Avocado command line exit status
        summary.update([status.upper() for status in
                        self.status_repo.get_result_set_for_tasks(test_ids)])
        return summary
コード例 #10
0
class Runner(RunnerInterface):

    name = 'nrunner'
    description = 'nrunner based implementation of job compliant runner'

    @staticmethod
    def _get_all_runtime_tasks(test_suite):
        runtime_tasks = []
        no_digits = len(str(len(test_suite)))
        status_uris = [test_suite.config.get('nrunner.status_server_uri')]
        for index, runnable in enumerate(test_suite.tests, start=1):
            # this is all rubbish data
            if test_suite.name:
                prefix = "{}-{}".format(test_suite.name, index)
            else:
                prefix = index
            test_id = TestID(prefix, runnable.uri, None, no_digits)
            task = nrunner.Task(runnable, test_id, status_uris,
                                nrunner.RUNNERS_REGISTRY_PYTHON_CLASS)
            runtime_tasks.append(RuntimeTask(task))
        return runtime_tasks

    def _start_status_server(self, status_server_listen):
        # pylint: disable=W0201
        self.status_repo = StatusRepo()
        # pylint: disable=W0201
        self.status_server = StatusServer(status_server_listen,
                                          self.status_repo)
        asyncio.ensure_future(self.status_server.serve_forever())

    async def _update_status(self, job):
        tasks_by_id = {
            str(runtime_task.task.identifier): runtime_task.task
            for runtime_task in self.tasks
        }
        message_handler = MessageHandler()
        while True:
            try:
                (task_id, _, _, index) = \
                    self.status_repo.status_journal_summary.pop(0)

            except IndexError:
                await asyncio.sleep(0.05)
                continue

            message = self.status_repo.get_task_data(task_id, index)
            task = tasks_by_id.get(task_id)
            message_handler.process_message(message, task, job)

    def run_suite(self, job, test_suite):
        # pylint: disable=W0201
        self.summary = set()

        test_suite.tests, _ = nrunner.check_runnables_runner_requirements(
            test_suite.tests)
        job.result.tests_total = test_suite.size  # no support for variants yet

        listen = test_suite.config.get('nrunner.status_server_listen')
        self._start_status_server(listen)

        # pylint: disable=W0201
        self.tasks = self._get_all_runtime_tasks(test_suite)
        if test_suite.config.get('nrunner.shuffle'):
            random.shuffle(self.tasks)
        tsm = TaskStateMachine(self.tasks)
        spawner_name = test_suite.config.get('nrunner.spawner')
        spawner = SpawnerDispatcher(test_suite.config)[spawner_name].obj
        max_running = min(test_suite.config.get('nrunner.max_parallel_tasks'),
                          len(self.tasks))
        timeout = test_suite.config.get('task.timeout.running')
        workers = [
            Worker(state_machine=tsm,
                   spawner=spawner,
                   max_running=max_running,
                   task_timeout=timeout).run() for _ in range(max_running)
        ]
        asyncio.ensure_future(self._update_status(job))
        loop = asyncio.get_event_loop()
        try:
            loop.run_until_complete(
                asyncio.wait_for(asyncio.gather(*workers), job.timeout
                                 or None))
        except (KeyboardInterrupt, asyncio.TimeoutError):
            self.summary.add("INTERRUPTED")

        # Wait until all messages may have been processed by the
        # status_updater. This should be replaced by a mechanism
        # that only waits if there are missing status messages to
        # be processed, and, only for a given amount of time.
        # Tests with non received status will always show as SKIP
        # because of result reconciliation.
        loop.run_until_complete(asyncio.sleep(0.05))

        job.result.end_tests()
        self.status_server.close()
        return self.summary