def test_positional_kwargs(self): """ Tests both positional and kwargs (theoretically impossible in python, but valid for nasty tests) keyword args are used as they explicitly represent what they mean """ tst = test.MockingTest("not used", "who cares", {}, None, "0", None, None, "extra_param1", "extra_param2", methodName="test", name=test.TestID(1, "my_name3"), params={}, base_logdir=self.tmpdir.name, tag="3", job=None, runner_queue=None, extra1="extra_param3", extra2="extra_param4") self.assertEqual(tst.name, "1-my_name3")
def resolutions_to_tasks(resolutions, config): tasks = [] index = 0 resolutions = [ res for res in resolutions if res.result == resolver.ReferenceResolutionResult.SUCCESS ] no_digits = len(str(len(resolutions))) for resolution in resolutions: name = resolution.reference for runnable in resolution.resolutions: filter_by_tags = config.get('filter_by_tags') if filter_by_tags: if not filter_test_tags_runnable( runnable, filter_by_tags, config.get('filter_by_tags_include_empty'), config.get('filter_by_tags_include_empty_key')): continue if runnable.uri: name = runnable.uri identifier = str(test.TestID(index + 1, name, None, no_digits)) tasks.append( nrunner.Task(identifier, runnable, [config.get('status_server')])) index += 1 return tasks
def suite_to_tasks(suite, status_uris): tasks = [] index = 0 no_digits = len(str(len(suite))) for factory in suite: klass, args = factory name = args.get("name") identifier = str(test.TestID(index + 1, name, None, no_digits)) if klass == test.PythonUnittest: test_dir = args.get("test_dir") module_prefix = test_dir.split(os.getcwd())[1][1:] module_prefix = module_prefix.replace("/", ".") unittest_path = "%s.%s" % (module_prefix, args.get("name")) runnable = nrunner.Runnable('python-unittest', unittest_path) elif klass == test.SimpleTest: runnable = nrunner.Runnable('exec-test', args.get('executable')) else: # FIXME: This should instead raise an error print('WARNING: unknown test type "%s", using "noop"' % factory[0]) runnable = nrunner.Runnable('noop') tasks.append(nrunner.Task(identifier, runnable, status_uris)) index += 1 return tasks
def test_all_dirs_exists_no_hang(self): with mock.patch('os.path.exists', return_value=True): self.assertRaises(exceptions.TestSetupFail, self.DummyTest, "test", test.TestID(1, "name"), base_logdir=self.tmpdir)
def test_combination(self): tst = test.MockingTest("test", test.TestID(1, "my_name4"), tag="321", other_param="Whatever", base_logdir=self.tmpdir.name) self.assertEqual(tst.name, "1-my_name4")
def test_init_kwargs(self): tst = test.MockingTest(methodName="test", name=test.TestID(1, "my_name2"), params={}, base_logdir=self.tmpdir, tag="a", job=None, runner_queue=None, extra1="extra_param1", extra2="extra_param2") self.assertEqual(tst.name, "1-my_name2")
def test_all_dirs_exists_no_hang(self): flexmock(os.path) os.path.should_receive('exists').and_return(True) self.assertRaises(exceptions.TestSetupFail, self.DummyTest, "test", test.TestID(1, "name"), base_logdir=self.tmpdir)
def test_uid_name_uid_too_large_digits(self): """ Tests that when the filesystem can not cope with the size of the Test ID, not even the test uid, an exception will be raised. """ test_id = test.TestID(1, 'test', no_digits=256) self.assertRaises(RuntimeError, lambda: test_id.str_filesystem)
def run(name, path_name): """ Initialize test and check the dirs were created """ tst = self.DummyTest("test", test.TestID(1, name), base_logdir=self.tmpdir) self.assertEqual(os.path.basename(tst.logdir), path_name) self.assertTrue(os.path.exists(tst.logdir)) self.assertEqual(os.path.dirname(os.path.dirname(tst.logdir)), self.tmpdir)
def test_simple_test_fail_status(self): self.script = script.TemporaryScript('avocado_fail.sh', FAIL_SCRIPT_CONTENTS, 'avocado_simpletest_unittest') self.script.save() tst_instance = test.SimpleTest(name=test.TestID(1, self.script.path), base_logdir=self.tmpdir) tst_instance.run_avocado() self.assertEqual(tst_instance.status, 'FAIL')
def test_uid_name_no_digits(self): uid = 1 name = 'file.py:klass.test_method' test_id = test.TestID(uid, name, no_digits=2) self.assertEqual(test_id.uid, 1) self.assertEqual(test_id.str_uid, '01') self.assertEqual(test_id.str_filesystem, '%s-%s' % ('01', name)) self.assertIs(test_id.variant, None) self.assertIs(test_id.str_variant, '')
def test_init(self): # No params self.tests.append(test.MockingTest()) # Positional self.tests.append(test.MockingTest("test", test.TestID(1, "my_name"), {}, None, "1", None, None, "extra_param1", "extra_param2")) self.assertEqual(self.tests[-1].name, "1-my_name") # Kwargs self.tests.append(test.MockingTest(methodName="test", name=test.TestID(1, "my_name2"), params={}, base_logdir=None, tag="a", job=None, runner_queue=None, extra1="extra_param1", extra2="extra_param2")) self.assertEqual(self.tests[-1].name, "1-my_name2") # both (theoretically impossible in python, but valid for nasty tests) # keyword args are used as they explicitly represent what they mean self.tests.append(test.MockingTest("not used", "who cares", {}, None, "0", None, None, "extra_param1", "extra_param2", methodName="test", name=test.TestID(1, "my_name3"), params={}, base_logdir=None, tag="3", job=None, runner_queue=None, extra1="extra_param3", extra2="extra_param4")) self.assertEqual(self.tests[-1].name, "1-my_name3") # combination self.tests.append(test.MockingTest("test", test.TestID(1, "my_name4"), tag="321", other_param="Whatever")) self.assertEqual(self.tests[-1].name, "1-my_name4") # ugly combination (positional argument overrides kwargs, this only # happens when the substituted class reorders the positional arguments. # We try to first match keyword args and then fall-back to positional # ones. name = "positional_method_name_becomes_test_name" tag = "positional_base_logdir_becomes_tag" self.tests.append(test.MockingTest(test.TestID(1, name), None, None, tag, methodName="test", other_param="Whatever")) self.assertEqual(self.tests[-1].name, "1-" + name)
def run_suite(self, job, result, test_suite, variants, timeout=0, replay_map=None, execution_order=None): summary = set() test_suite, _ = nrunner.check_tasks_requirements(test_suite) result.tests_total = len(test_suite) # no support for variants yet result_dispatcher = job.result_events_dispatcher for index, task in enumerate(test_suite): task.known_runners = nrunner.RUNNERS_REGISTRY_PYTHON_CLASS index += 1 # this is all rubbish data early_state = { 'name': test.TestID(index, task.identifier), 'job_logdir': job.logdir, 'job_unique_id': job.unique_id, } result.start_test(early_state) job.result_events_dispatcher.map_method('start_test', result, early_state) statuses = [] task.status_services = [] for status in task.run(): result_dispatcher.map_method('test_progress', False) statuses.append(status) if status['status'] not in ["started", "running"]: break # test execution time is currently missing # since 358e800e81 all runners all produce the result in a key called # 'result', instead of 'status'. But the Avocado result plugins rely # on the current runner approach test_state = {'status': statuses[-1]['result'].upper()} test_state.update(early_state) time_start = statuses[0]['time'] time_end = statuses[-1]['time'] time_elapsed = time_end - time_start test_state['time_start'] = time_start test_state['time_end'] = time_end test_state['time_elapsed'] = time_elapsed # fake log dir, needed by some result plugins such as HTML test_state['logdir'] = '' # Populate task dir base_path = os.path.join(job.logdir, 'test-results') self._populate_task_logdir(base_path, task, statuses, job.config.get('core.debug')) result.check_test(test_state) result_dispatcher.map_method('end_test', result, test_state) return summary
def test_check_reference_does_not_exist(self): ''' Tests that a check is not made for a file that does not exist ''' tst = self.DummyTest("test", test.TestID(1, "test"), base_logdir=self.tmpdir) self.assertFalse( tst._check_reference('does_not_exist', 'stdout.expected', 'stdout.diff', 'stdout_diff', 'Stdout'))
def test_uid_name(self): uid = 1 name = 'file.py:klass.test_method' test_id = test.TestID(uid, name) self.assertEqual(test_id.uid, 1) self.assertEqual(test_id.str_uid, '1') self.assertEqual(test_id.str_filesystem, astring.string_to_safe_path('%s-%s' % (uid, name))) self.assertIs(test_id.variant, None) self.assertIs(test_id.str_variant, '')
def _get_fake_filename_test(self, name): class FakeFilename(test.Test): @property def filename(self): return name def test(self): pass tst_id = test.TestID("test", name=name) return FakeFilename("test", tst_id, base_logdir=self.tmpdir)
def test_py_simple_test(self): with script.TemporaryScript( 'simpletest.py', PY_SIMPLE_TEST, 'avocado_loader_unittest') as avocado_simple_test: test_class, test_parameters = (self.loader.discover( avocado_simple_test.path, loader.DiscoverMode.ALL)[0]) self.assertTrue(test_class == test.SimpleTest) test_parameters['name'] = test.TestID(0, test_parameters['name']) test_parameters['base_logdir'] = self.tmpdir.name tc = test_class(**test_parameters) tc.run_avocado()
def test_init_positional(self): tst = test.MockingTest("test", test.TestID(1, "my_name"), {}, None, "1", None, None, "extra_param1", "extra_param2", base_logdir=self.tmpdir.name) self.assertEqual(tst.name, "1-my_name")
def setUp(self): self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__) self.pass_script = script.TemporaryScript( 'avocado_pass.sh', PASS_SCRIPT_CONTENTS, 'avocado_simpletest_unittest') self.pass_script.save() self.fail_script = script.TemporaryScript( 'avocado_fail.sh', FAIL_SCRIPT_CONTENTS, 'avocado_simpletest_unittest') self.fail_script.save() self.tst_instance_pass = test.SimpleTest(name=test.TestID( 1, self.pass_script.path), base_logdir=self.tmpdir) self.tst_instance_pass.run_avocado() self.tst_instance_fail = test.SimpleTest(name=test.TestID( 1, self.fail_script.path), base_logdir=self.tmpdir) self.tst_instance_fail.run_avocado()
def test_uid_large_name(self): """ Tests that when the filesystem can not cope with the size of the Test ID, the name will be shortened. """ uid = 1 name = 'test_' * 51 # 255 characters test_id = test.TestID(uid, name) self.assertEqual(test_id.uid, 1) # only 253 can fit for the test name self.assertEqual(test_id.str_filesystem, '%s-%s' % (uid, name[:253])) self.assertIs(test_id.variant, None) self.assertIs(test_id.str_variant, "")
def test_uid_name_large_digits(self): """ Tests that when the filesystem can only cope with the size of the Test ID, that's the only thing that will be kept. """ uid = 1 name = 'test' test_id = test.TestID(uid, name, no_digits=255) self.assertEqual(test_id.uid, 1) self.assertEqual(test_id.str_uid, '%0255i' % uid) self.assertEqual(test_id.str_filesystem, '%0255i' % uid) self.assertIs(test_id.variant, None) self.assertIs(test_id.str_variant, '')
def run_suite(self, job, result, test_suite, variants, timeout=0, replay_map=None, execution_order=None): summary = set() test_suite, _ = check_tasks_requirements(test_suite, self.KNOWN_EXTERNAL_RUNNERS) # pylint: disable=W0201 result.tests_total = len(test_suite) # no support for variants yet result_dispatcher = job.result_events_dispatcher for index, task in enumerate(test_suite): index += 1 # this is all rubbish data early_state = { 'name': test.TestID(index, task.identifier), 'job_logdir': job.logdir, 'job_unique_id': job.unique_id, } result.start_test(early_state) job.result_events_dispatcher.map_method('start_test', result, early_state) statuses = [] task.status_services = [] for status in task.run(): result_dispatcher.map_method('test_progress', False) statuses.append(status) if status['status'] not in ["init", "running"]: break # test execution time is currently missing test_state = {'status': statuses[-1]['status'].upper()} test_state.update(early_state) time_start = statuses[0]['time_start'] time_end = statuses[-1]['time_end'] time_elapsed = time_end - time_start test_state['time_start'] = time_start test_state['time_end'] = time_end test_state['time_elapsed'] = time_elapsed # fake log dir, needed by some result plugins such as HTML test_state['logdir'] = '' result.check_test(test_state) result_dispatcher.map_method('end_test', result, test_state) return summary
def test_load_not_a_test_exec(self): avocado_not_a_test = script.TemporaryScript('notatest.py', NOT_A_TEST, 'avocado_loader_unittest') avocado_not_a_test.save() test_class, test_parameters = (self.loader.discover( avocado_not_a_test.path, loader.DiscoverMode.ALL)[0]) self.assertTrue(test_class == test.SimpleTest, test_class) test_parameters['name'] = test.TestID(0, test_parameters['name']) test_parameters['base_logdir'] = self.tmpdir tc = test_class(**test_parameters) # The test can't be executed (no shebang), raising an OSError # (OSError: [Errno 8] Exec format error) self.assertRaises(OSError, tc.test) avocado_not_a_test.remove()
def test_combination_2(self): """ Tests an ugly combination (positional argument overrides kwargs, this only happens when the substituted class reorders the positional arguments. We try to first match keyword args and then fall-back to positional ones. """ name = "positional_method_name_becomes_test_name" tag = "positional_base_logdir_becomes_tag" tst = test.MockingTest(test.TestID(1, name), None, None, tag, methodName="test", other_param="Whatever", base_logdir=self.tmpdir) self.assertEqual(tst.name, "1-" + name)
def test_load_simple(self): simple_test = script.TemporaryScript('simpletest.sh', SIMPLE_TEST, 'avocado_loader_unittest') simple_test.save() test_class, test_parameters = (self.loader.discover( simple_test.path, loader.DiscoverMode.ALL)[0]) self.assertTrue(test_class == test.SimpleTest, test_class) test_parameters['name'] = test.TestID(0, test_parameters['name']) test_parameters['base_logdir'] = self.tmpdir tc = test_class(**test_parameters) tc.run_avocado() suite = self.loader.discover(simple_test.path, loader.DiscoverMode.ALL) self.assertEqual(len(suite), 1) self.assertEqual(suite[0][1]["name"], simple_test.path) simple_test.remove()
def test_uid_name_large_variant(self): """ Tests that when the filesystem can not cope with the size of the Test ID, and a variant name is present, the name will be removed. """ uid = 1 name = 'test' variant_id = 'fast_' * 51 # 255 characters variant = {'variant_id': variant_id} test_id = test.TestID(uid, name, variant=variant) self.assertEqual(test_id.uid, 1) self.assertEqual(test_id.str_filesystem, '%s_%s' % (uid, variant_id[:253])) self.assertIs(test_id.variant, variant_id) self.assertEqual(test_id.str_variant, ";%s" % variant_id)
def get_test_factory(self, job=None): """ Get test factory from which the test loader will get a runnable test instance. :param job: avocado job object to for running or None for reporting only :type job: :py:class:`avocado.core.job.Job` :return: test class and constructor parameters :rtype: (type, {str, obj}) """ test_constructor_params = { 'name': test.TestID(self.id, self.params["shortname"]), 'vt_params': self.params } if job is not None: test_constructor_params['job'] = job test_constructor_params['base_logdir'] = job.logdir return (VirtTest, test_constructor_params)
def test_check_reference_success(self): ''' Tests that a check is made, and is successful ''' class GetDataTest(test.Test): def test(self): pass def get_data(self, filename, source=None, must_exist=True): # return the filename (path, really) unchanged return filename tst = GetDataTest("test", test.TestID(1, "test"), base_logdir=self.tmpdir) content = 'expected content\n' content_path = os.path.join(tst.logdir, 'content') with open(content_path, 'w') as produced: produced.write(content) self.assertTrue( tst._check_reference(content_path, content_path, 'content.diff', 'content_diff', 'Content'))
def check(uid, name, variant, exp_logdir): tst = self.DummyTest("test", test.TestID(uid, name, variant), base_logdir=self.tmpdir) self.assertEqual(os.path.basename(tst.logdir), exp_logdir) return tst
def run_suite(self, job, result, test_suite, variants, timeout=0, replay_map=None, execution_order=None): """ Run one or more tests and report with test result. :param job: an instance of :class:`avocado.core.job.Job`. :param result: an instance of :class:`avocado.core.result.Result` :param test_suite: a list of tests to run. :param variants: A varianter iterator to produce test params. :param timeout: maximum amount of time (in seconds) to execute. :param replay_map: optional list to override test class based on test index. :param execution_order: Mode in which we should iterate through tests and variants. If not provided, will default to :attr:`DEFAULT_EXECUTION_ORDER`. :return: a set with types of test failures. """ summary = set() if job.sysinfo is not None: job.sysinfo.start_job_hook() queue = multiprocessing.SimpleQueue() if timeout > 0: deadline = time.time() + timeout else: deadline = None test_result_total = variants.get_number_of_tests(test_suite) no_digits = len(str(test_result_total)) result.tests_total = test_result_total index = -1 try: for test_factory in test_suite: test_factory[1]["base_logdir"] = job.logdir test_factory[1]["job"] = job if execution_order is None: execution_order = self.DEFAULT_EXECUTION_ORDER for test_factory, variant in self._iter_suite(job, test_suite, variants, execution_order): index += 1 test_parameters = test_factory[1] name = test_parameters.get("name") test_parameters["name"] = test.TestID(index + 1, name, variant, no_digits) if deadline is not None and time.time() > deadline: summary.add('INTERRUPTED') if 'methodName' in test_parameters: del test_parameters['methodName'] test_factory = (test.TimeOutSkipTest, test_parameters) if not self.run_test(job, result, test_factory, queue, summary): break else: if (replay_map is not None and replay_map[index] is not None): test_parameters["methodName"] = "test" test_factory = (replay_map[index], test_parameters) if not self.run_test(job, result, test_factory, queue, summary, deadline): break except KeyboardInterrupt: TEST_LOG.error('Job interrupted by ctrl+c.') summary.add('INTERRUPTED') if job.sysinfo is not None: job.sysinfo.end_job_hook() result.end_tests() job.funcatexit.run() signal.signal(signal.SIGTSTP, signal.SIG_IGN) return summary