def _start_status_server(self, status_server_listen): # pylint: disable=W0201 self.status_repo = StatusRepo() # pylint: disable=W0201 self.status_server = StatusServer(status_server_listen, self.status_repo) asyncio.ensure_future(self.status_server.serve_forever())
def _create_status_server(self, test_suite, job): listen = self._determine_status_server_uri(test_suite) # pylint: disable=W0201 self.status_repo = StatusRepo(job.unique_id) # pylint: disable=W0201 self.status_server = StatusServer(listen, self.status_repo)
def run_suite(self, job, test_suite): """ Run one or more tests and report with test result. :param job: job that includes the test suite :type test_suite: :py:class:`avocado.core.job.Job` :param test_suite: test suite with some tests to run :type test_suite: :py:class:`avocado.core.suite.TestSuite` :returns: a set with types of test failures :rtype: :py:class:`set` """ self.job = job self.status_repo = StatusRepo(job.unique_id) self.status_server = StatusServer( job.config.get('nrunner.status_server_listen'), self.status_repo) asyncio.ensure_future(self.status_server.serve_forever()) # TODO: this needs more customization asyncio.ensure_future(self._update_status(job)) graph = self._graph_from_suite(test_suite) summary = set() params = self.job.config["param_dict"] self.tasks = [] self.slots = params.get("slots", "").split(" ") # TODO: fix other run_traversal calls try: graph.visualize(self.job.logdir) self.run_traversal(graph, params) if not self.all_tests_ok: # the summary is a set so only a single failed test is enough summary.add('FAIL') except KeyboardInterrupt: summary.add('INTERRUPTED') # TODO: the avocado implementation needs a workaround here: # Wait until all messages may have been processed by the # status_updater. This should be replaced by a mechanism # that only waits if there are missing status messages to # be processed, and, only for a given amount of time. # Tests with non received status will always show as SKIP # because of result reconciliation. time.sleep(0.05) self.job.result.end_tests() self.job.funcatexit.run() self.status_server.close() signal.signal(signal.SIGTSTP, signal.SIG_IGN) return summary
async def test(self): number_of_tasks = 80 number_of_workers = 8 runnable = Runnable("noop", "noop") runtime_tasks = [RuntimeTask(Task(runnable, "%03i" % _)) for _ in range(1, number_of_tasks + 1)] spawner = Spawner() status_repo = StatusRepo() state_machine = statemachine.TaskStateMachine(runtime_tasks, status_repo) workers = [statemachine.Worker(state_machine, spawner).run() for _ in range(number_of_workers)] await asyncio.gather(*workers) self.assertEqual(number_of_tasks, len(state_machine.finished))
async def run_test(self, job, node): """ Run a test instance inside a subprocess. :param job: job that includes the test suite :type job: :py:class:`avocado.core.job.Job` :param node: test node to run :type node: :py:class:`TestNode` """ if node.spawner is None: default_slot = self.slots[0] if len(self.slots) > 0 else "" node.set_environment(job, default_slot) # once the slot is set (here or earlier), the hostname reflects it hostname = node.params["hostname"] hostname = "localhost" if not hostname else hostname logging.debug(f"Running {node.id} on {hostname}") if not self.status_repo: self.status_repo = StatusRepo(job.unique_id) self.status_server = StatusServer( job.config.get('nrunner.status_server_listen'), self.status_repo) asyncio.ensure_future(self.status_server.serve_forever()) # TODO: this needs more customization asyncio.ensure_future(self._update_status(job)) raw_task = nrunner.Task(node.get_runnable(), node.id_test, [job.config.get('nrunner.status_server_uri')], nrunner.RUNNERS_REGISTRY_PYTHON_CLASS, job_id=self.job.unique_id) task = RuntimeTask(raw_task) self.tasks += [task] # TODO: use a single state machine for all test nodes when we are able # to at least add requested tasks to it safely (using its locks) await Worker( state_machine=TaskStateMachine([task], self.status_repo), spawner=node.spawner, max_running=1, task_timeout=job.config.get('task.timeout.running')).run()