def _get_requirements_runtime_tasks(runnable, prefix): if runnable.requirements is None: return # creates the runnables for the requirements requirements_runnables = RequirementsResolver.resolve(runnable) requirements_runtime_tasks = [] # creates the tasks and runtime tasks for the requirements for requirement_runnable in requirements_runnables: name = '%s-%s' % (requirement_runnable.kind, requirement_runnable.kwargs.get('name')) # the human UI works with TestID objects, so we need to # use it to name other tasks task_id = TestID(prefix, name, None) # with --dry-run we don't want to run requirement if runnable.kind == 'dry-run': requirement_runnable.kind = 'noop' # creates the requirement task requirement_task = nrunner.Task(requirement_runnable, identifier=task_id, category='requirement') # make sure we track the dependencies of a task # runtime_task.task.dependencies.add(requirement_task) # created the requirement runtime task requirements_runtime_tasks.append(RuntimeTask(requirement_task)) return requirements_runtime_tasks
def _create_runtime_tasks_for_test(test_suite, runnable, no_digits, index, variant): """Creates runtime tasks for both tests, and for its requirements.""" result = [] # test related operations # create test ID if test_suite.name: prefix = "{}-{}".format(test_suite.name, index) else: prefix = index test_id = TestID(prefix, runnable.uri, variant, no_digits) # inject variant on runnable runnable.variant = dump_variant(variant) # handles the test task task = nrunner.Task( runnable, identifier=test_id, known_runners=nrunner.RUNNERS_REGISTRY_PYTHON_CLASS) runtime_task = RuntimeTask(task) result.append(runtime_task) # handles the requirements requirements_runtime_tasks = (Runner._get_requirements_runtime_tasks( runnable, prefix)) # extend the list of tasks with the requirements runtime tasks if requirements_runtime_tasks is not None: for requirement_runtime_task in requirements_runtime_tasks: # make sure we track the dependencies of a task runtime_task.task.dependencies.add( requirement_runtime_task.task) result.extend(requirements_runtime_tasks) return result
def suite_to_tasks(suite, status_uris): tasks = [] index = 0 no_digits = len(str(len(suite))) for factory in suite: klass, args = factory name = args.get("name") identifier = str(test.TestID(index + 1, name, None, no_digits)) if klass == test.PythonUnittest: test_dir = args.get("test_dir") module_prefix = test_dir.split(os.getcwd())[1][1:] module_prefix = module_prefix.replace("/", ".") unittest_path = "%s.%s" % (module_prefix, args.get("name")) runnable = nrunner.Runnable('python-unittest', unittest_path) elif klass == test.SimpleTest: runnable = nrunner.Runnable('exec-test', args.get('executable')) else: # FIXME: This should instead raise an error print('WARNING: unknown test type "%s", using "noop"' % factory[0]) runnable = nrunner.Runnable('noop') tasks.append(nrunner.Task(identifier, runnable, status_uris)) index += 1 return tasks
def resolutions_to_tasks(resolutions, config): tasks = [] index = 0 resolutions = [ res for res in resolutions if res.result == resolver.ReferenceResolutionResult.SUCCESS ] no_digits = len(str(len(resolutions))) for resolution in resolutions: name = resolution.reference for runnable in resolution.resolutions: filter_by_tags = config.get('filter_by_tags') if filter_by_tags: if not filter_test_tags_runnable( runnable, filter_by_tags, config.get('filter_by_tags_include_empty'), config.get('filter_by_tags_include_empty_key')): continue if runnable.uri: name = runnable.uri identifier = str(test.TestID(index + 1, name, None, no_digits)) tasks.append( nrunner.Task(identifier, runnable, [config.get('status_server')])) index += 1 return tasks
def _get_all_runtime_tasks(test_suite): runtime_tasks = [] no_digits = len(str(len(test_suite))) status_uris = [test_suite.config.get('nrunner.status_server_uri')] for index, runnable in enumerate(test_suite.tests, start=1): # this is all rubbish data if test_suite.name: prefix = "{}-{}".format(test_suite.name, index) else: prefix = index test_id = TestID(prefix, runnable.uri, None, no_digits) task = nrunner.Task(runnable, test_id, status_uris, nrunner.RUNNERS_REGISTRY_PYTHON_CLASS) runtime_tasks.append(RuntimeTask(task)) return runtime_tasks
async def run_test(self, job, node): """ Run a test instance inside a subprocess. :param job: job that includes the test suite :type job: :py:class:`avocado.core.job.Job` :param node: test node to run :type node: :py:class:`TestNode` """ if node.spawner is None: default_slot = self.slots[0] if len(self.slots) > 0 else "" node.set_environment(job, default_slot) # once the slot is set (here or earlier), the hostname reflects it hostname = node.params["hostname"] hostname = "localhost" if not hostname else hostname logging.debug(f"Running {node.id} on {hostname}") if not self.status_repo: self.status_repo = StatusRepo(job.unique_id) self.status_server = StatusServer( job.config.get('nrunner.status_server_listen'), self.status_repo) asyncio.ensure_future(self.status_server.serve_forever()) # TODO: this needs more customization asyncio.ensure_future(self._update_status(job)) raw_task = nrunner.Task(node.get_runnable(), node.id_test, [job.config.get('nrunner.status_server_uri')], nrunner.RUNNERS_REGISTRY_PYTHON_CLASS, job_id=self.job.unique_id) task = RuntimeTask(raw_task) self.tasks += [task] # TODO: use a single state machine for all test nodes when we are able # to at least add requested tasks to it safely (using its locks) await Worker( state_machine=TaskStateMachine([task], self.status_repo), spawner=node.spawner, max_running=1, task_timeout=job.config.get('task.timeout.running')).run()
def setUp(self): runnable = nrunner.Runnable('lets-image-a-kind', 'test_pick_runner_command') self.task = nrunner.Task('1-test_pick_runner_command', runnable)
def setUp(self): runnable = nrunner.Runnable('mykind', 'test_runner_command_selection') self.task = nrunner.Task('1-test_runner_command_selection', runnable)
def test_set_category(self): runnable = nrunner.Runnable('noop', 'noop_uri') task = nrunner.Task(runnable, 'task_id', category='new_category') self.assertEqual(task.category, 'new_category')
def test_default_category(self): runnable = nrunner.Runnable('noop', 'noop_uri') task = nrunner.Task(runnable, 'task_id') self.assertEqual(task.category, 'test')
def setUp(self): runnable = nrunner.Runnable('noop', 'uri') self.task = nrunner.Task('1', runnable) self.spawner = MockRandomAliveSpawner()
def setUp(self): runnable = nrunner.Runnable('noop', 'uri') self.task = nrunner.Task('1', runnable) self.spawner = ProcessSpawner()
def setUp(self): runnable = nrunner.Runnable('noop', 'uri') task = nrunner.Task(runnable, '1') self.runtime_task = RuntimeTask(task) self.spawner = MockRandomAliveSpawner()
def setUp(self): runnable = nrunner.Runnable('noop', 'uri') task = nrunner.Task(runnable, '1') self.runtime_task = RuntimeTask(task) self.spawner = ProcessSpawner()
def setUp(self): runnable = nrunner.Runnable('noop', 'uri') task = nrunner.Task('1', runnable) self.runtime_task = RuntimeTask(task) self.spawner = MockSpawner()