def test_job_run_result_json_enabled(self): self.base_config['job.run.result.json.enabled'] = True with Job.from_config(self.base_config) as j: result = j.run() self.assertEqual(result, exit_codes.AVOCADO_ALL_OK) json_results_path = os.path.join(self.tmpdir.name, 'latest', 'results.json') self.assertTrue(os.path.exists(json_results_path))
def test_custom_exit_codes(self): config = { 'resolver.references': ['/bin/false'], 'runner.exectest.exitcodes.skip': [1] } with Job.from_config(job_config=config) as job: self.assertEqual(job.run(), 0)
def test_job_run_result_json_output(self): json_results_path = os.path.join(self.tmpdir.name, 'myresults.json') self.base_config['job.run.result.json.output'] = json_results_path with Job.from_config(self.base_config) as j: result = j.run() self.assertEqual(result, exit_codes.AVOCADO_ALL_OK) self.assertTrue(os.path.exists(json_results_path))
def test_custom_exit_codes(self): config = { "resolver.references": ["/bin/false"], "runner.exectest.exitcodes.skip": [1], } with Job.from_config(job_config=config) as job: self.assertEqual(job.run(), 0)
def test_sleep_longer_timeout(self): config = {'resolver.references': [self.script.path], 'run.results_dir': self.tmpdir.name, 'task.timeout.running': 2, 'run.test_runner': 'nrunner'} with Job.from_config(job_config=config) as job: job.run() self.assertEqual(1, job.result.skipped) self.assertEqual(0, job.result.passed)
def test_custom_exit_codes(self): status_server = "127.0.0.1:%u" % find_free_port() config = { 'run.references': ['/bin/false'], 'run.test_runner': 'nrunner', 'runner.exectest.exitcodes.skip': [1], 'nrunner.status_server_listen': status_server, 'nrunner.status_server_uri': status_server, 'run.keep_tmp': True } with Job.from_config(job_config=config) as job: self.assertEqual(job.run(), 0)
def test_failfast(self): config = {'resolver.references': ['/bin/true', '/bin/false', '/bin/true', '/bin/true'], 'run.failfast': True, 'nrunner.shuffle': False, 'nrunner.max_parallel_tasks': 1} with Job.from_config(job_config=config) as job: self.assertEqual(job.run(), 9) self.assertEqual(job.result.passed, 1) self.assertEqual(job.result.errors, 0) self.assertEqual(job.result.failed, 1) self.assertEqual(job.result.skipped, 2)
def test_sleep_longer_timeout(self): status_server = '127.0.0.1:%u' % find_free_port() config = {'run.references': [self.script.path], 'nrunner.status_server_listen': status_server, 'nrunner.status_server_uri': status_server, 'run.results_dir': self.tmpdir.name, 'run.keep_tmp': True, 'task.timeout.running': 2, 'run.test_runner': 'nrunner'} with Job.from_config(job_config=config) as job: job.run() self.assertEqual(1, job.result.skipped) self.assertEqual(0, job.result.passed)
def test_sleep_longer_timeout(self): config = { "resolver.references": [self.script.path], "run.results_dir": self.tmpdir.name, "task.timeout.running": 2, } with Job.from_config(job_config=config) as job: job.run() self.assertEqual(1, job.result.interrupted) self.assertEqual(0, job.result.passed) self.assertEqual(0, job.result.skipped) self.assertEqual("Test interrupted: Timeout reached", job.result.tests[0]["fail_reason"])
def test_failfast(self): status_server = "127.0.0.1:%u" % find_free_port() config = { 'run.references': ['/bin/true', '/bin/false', '/bin/true', '/bin/true'], 'run.test_runner': 'nrunner', 'run.failfast': True, 'nrunner.shuffle': False, 'nrunner.status_server_listen': status_server, 'nrunner.status_server_uri': status_server, 'nrunner.max_parallel_tasks': 1 } with Job.from_config(job_config=config) as job: self.assertEqual(job.run(), 9) self.assertEqual(job.result.passed, 1) self.assertEqual(job.result.errors, 0) self.assertEqual(job.result.failed, 1) self.assertEqual(job.result.skipped, 2)
def test_sleep_longer_timeout_podman(self): with script.Script(os.path.join(self.tmpdir.name, "sleeptest.py"), TEST_INSTRUMENTED_SLEEP) as test: config = { 'resolver.references': [test.path], 'run.results_dir': self.tmpdir.name, 'task.timeout.running': 2, 'nrunner.spawner': 'podman', 'spawner.podman.image': 'fedora:latest' } with Job.from_config(job_config=config) as job: job.run() self.assertEqual(1, job.result.interrupted) self.assertEqual(0, job.result.passed) self.assertEqual(0, job.result.skipped) self.assertEqual('Test interrupted: Timeout reached', job.result.tests[0]['fail_reason'])
def test_failfast(self): config = { "resolver.references": [ "/bin/true", "/bin/false", "/bin/true", "/bin/true", ], "run.failfast": True, "nrunner.shuffle": False, "nrunner.max_parallel_tasks": 1, } with Job.from_config(job_config=config) as job: self.assertEqual(job.run(), 9) self.assertEqual(job.result.passed, 1) self.assertEqual(job.result.errors, 0) self.assertEqual(job.result.failed, 1) self.assertEqual(job.result.skipped, 2)
def test_sleep_longer_timeout_podman(self): with script.Script( os.path.join(self.tmpdir.name, "sleeptest.py"), TEST_INSTRUMENTED_SLEEP ) as test: config = { "resolver.references": [test.path], "run.results_dir": self.tmpdir.name, "task.timeout.running": 2, "nrunner.spawner": "podman", "spawner.podman.image": "fedora:latest", } with Job.from_config(job_config=config) as job: job.run() self.assertEqual(1, job.result.interrupted) self.assertEqual(0, job.result.passed) self.assertEqual(0, job.result.skipped) self.assertEqual( "Test interrupted: Timeout reached", job.result.tests[0]["fail_reason"] )
#!/usr/bin/env python3 import sys from avocado.core.job import Job job_config = { "resolver.references": ["examples/tests/passtest.py:PassTest.test"] } # Automatic helper method (Avocado will try to discovery things from config # dicts. Since there is magic here, we don't need to pass suite names or suites, # and test/task id will be prepend with the suite index (in this case 1 and 2) with Job.from_config(job_config=job_config) as job: sys.exit(job.run())
from avocado.core.job import Job THIS_DIR = os.path.dirname(os.path.abspath(__file__)) ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(THIS_DIR))) TESTS_DIR = os.path.join(os.path.dirname(THIS_DIR), 'tests') cirrus_ci = { 'resolver.references': [os.path.join(TESTS_DIR, 'cirrusci.py')], } parallel_1 = { 'resolver.references': [os.path.join('selftests', 'unit'), os.path.join('selftests', 'functional')], 'filter.by_tags.tags': ['parallel:1'], 'nrunner.max_parallel_tasks': 1, } vmimage = { 'resolver.references': [os.path.join(TESTS_DIR, 'vmimage.py')], 'yaml_to_mux.files': [os.path.join(TESTS_DIR, 'vmimage.py.data', 'variants.yml')], 'nrunner.max_parallel_tasks': 1, } if __name__ == '__main__': os.chdir(ROOT_DIR) config = {'job.output.testlogs.statuses': ['FAIL', 'ERROR', 'INTERRUPT']} with Job.from_config(config, [cirrus_ci, parallel_1, vmimage]) as j: os.environ['AVOCADO_CHECK_LEVEL'] = '3' sys.exit(j.run())