def setUp(self): os.chdir(BASEDIR) prefix = temp_dir_prefix(__name__, self, 'setUp') self.tmpdir = tempfile.mkdtemp(prefix=prefix) test_path = os.path.join(self.tmpdir, 'test_skip_decorators.py') self.test_module = script.Script(test_path, AVOCADO_TEST_SKIP_DECORATORS) self.test_module.save() lib_path = os.path.join(self.tmpdir, 'lib_skip_decorators.py') self.test_lib = script.Script(lib_path, AVOCADO_TEST_SKIP_LIB) self.test_lib.save() skip_setup_path = os.path.join(self.tmpdir, 'test_skip_decorator_setup.py') self.skip_setup = script.Script(skip_setup_path, AVOCADO_SKIP_DECORATOR_SETUP) self.skip_setup.save() bad_teardown_path = os.path.join(self.tmpdir, 'test_skip_decorator_teardown.py') self.bad_teardown = script.Script(bad_teardown_path, AVOCADO_SKIP_DECORATOR_TEARDOWN) self.bad_teardown.save()
def test_pre_post(self): """ Runs both pre and post scripts and makes sure both execute properly """ touch_script = script.Script(os.path.join(self.pre_dir, 'touch.sh'), SCRIPT_PRE_TOUCH) touch_script.save() test_check_touch = script.Script( os.path.join(self.tmpdir.name, 'check_touch.sh'), TEST_CHECK_TOUCH) test_check_touch.save() rm_script = script.Script(os.path.join(self.post_dir, 'rm.sh'), SCRIPT_POST_RM) rm_script.save() config = script.TemporaryScript( "pre_post.conf", SCRIPT_PRE_POST_CFG % (self.pre_dir, self.post_dir)) with config: cmd = ('%s --config %s run --job-results-dir %s ' '--disable-sysinfo %s' % (AVOCADO, config, self.tmpdir.name, test_check_touch)) result = process.run(cmd) # Pre/Post scripts failures do not (currently?) alter the exit status self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK) self.assertNotIn( 'Pre job script "%s" exited with status "1"' % touch_script, result.stderr_text) self.assertNotIn( 'Post job script "%s" exited with status "1"' % rm_script, result.stderr_text)
def test_pre_post(self): """ Runs both pre and post scripts and makes sure both execute properly """ touch_script = script.Script( os.path.join(self.pre_dir, "touch.sh"), SCRIPT_PRE_TOUCH ) touch_script.save() test_check_touch = script.Script( os.path.join(self.tmpdir.name, "check_touch.sh"), TEST_CHECK_TOUCH ) test_check_touch.save() rm_script = script.Script(os.path.join(self.post_dir, "rm.sh"), SCRIPT_POST_RM) rm_script.save() config = script.TemporaryScript( "pre_post.conf", SCRIPT_PRE_POST_CFG % (self.pre_dir, self.post_dir) ) with config: cmd = ( f"{AVOCADO} --config {config} run " f"--job-results-dir {self.tmpdir.name} " f"--disable-sysinfo {test_check_touch}" ) result = process.run(cmd) # Pre/Post scripts failures do not (currently?) alter the exit status self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK) self.assertNotIn( f'Pre job script "{touch_script}" exited with status "1"', result.stderr_text, ) self.assertNotIn( f'Post job script "{rm_script}" exited with status "1"', result.stderr_text )
def setUp(self): os.chdir(basedir) self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__) test_path = os.path.join(self.tmpdir, 'test_cancel.py') self._test_cancel = script.Script(test_path, TEST_CANCEL) self._test_cancel.save() test_path = os.path.join(self.tmpdir, 'test_cancel_on_setup.py') self._test_cancel_on_setup = script.Script(test_path, TEST_CANCEL_ON_SETUP) self._test_cancel_on_setup.save()
def setUp(self): os.chdir(basedir) self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__) test_path = os.path.join(self.tmpdir, 'test_skip_decorators.py') self.test_module = script.Script(test_path, AVOCADO_TEST_SKIP_DECORATORS) self.test_module.save() lib_path = os.path.join(self.tmpdir, 'lib_skip_decorators.py') self.test_lib = script.Script(lib_path, AVOCADO_TEST_SKIP_LIB) self.test_lib.save()
def test_print_to_std(self): def _check_output(path, exps, name): i = 0 end = len(exps) for line in open(path): if exps[i] in line: i += 1 if i == end: break self.assertEqual(i, end, "Failed to find %sth message from\n%s\n" "\nin the %s. Either it's missing or in wrong " "order.\n%s" % (i, "\n".join(exps), name, open(path).read())) test = script.Script(os.path.join(self.tmpdir, "output_test.py"), OUTPUT_TEST_CONTENT) test.save() result = process.run("%s run --job-results-dir %s --sysinfo=off " "--json - -- %s" % (AVOCADO, self.tmpdir, test)) res = json.loads(result.stdout) joblog = res["debuglog"] exps = ["[stdout] top_print", "[stdout] top_stdout", "[stderr] top_stderr", "[stdout] top_process", "[stdout] init_print", "[stdout] init_stdout", "[stderr] init_stderr", "[stdout] init_process", "[stdout] test_print", "[stdout] test_stdout", "[stderr] test_stderr", "[stdout] test_process"] _check_output(joblog, exps, "job.log") testdir = res["tests"][0]["logdir"] self.assertEqual("test_print\ntest_stdout\ntest_process", open(os.path.join(testdir, "stdout")).read()) self.assertEqual("test_stderr\n", open(os.path.join(testdir, "stderr")).read()) self.assertEqual("test_print\ntest_stdout\ntest_stderr\n" "test_process", open(os.path.join(testdir, "output")).read())
def test_exception_not_in_path(self): os.mkdir(os.path.join(self.tmpdir.name, "shared_lib")) mylib = script.Script( os.path.join(self.tmpdir.name, "shared_lib", "mylib.py"), "from avocado import TestCancel\n\n" "class CancelExc(TestCancel):\n" " pass") mylib.save() mytest = script.Script(os.path.join(self.tmpdir.name, "mytest.py"), RAISE_CUSTOM_PATH_EXCEPTION_CONTENT) mytest.save() result = process.run(f'{AVOCADO} --show test run --disable-sysinfo ' f'--job-results-dir {self.tmpdir.name} {mytest}') self.assertIn( b"'fail_reason': 'This should not crash on " b"unpickling in runner'", result.stdout)
def test_runner_test_with_local_imports(self): prefix = temp_dir_prefix(self) with tempfile.TemporaryDirectory(prefix=prefix) as libdir: with script.Script(os.path.join(libdir, 'mylib.py'), "def hello():\n return 'Hello world'"): with script.Script( os.path.join(libdir, 'test_local_imports.py'), ('from avocado import Test\n' 'from mylib import hello\n' 'class LocalImportTest(Test):\n' ' def test(self):\n' ' self.log.info(hello())\n')) as mytest: cmd_line = (f'{AVOCADO} run --disable-sysinfo ' f'--job-results-dir {self.tmpdir.name} ' f'{mytest}') process.run(cmd_line)
def test_runner_simple_python_like_multiple_files(self): """ :avocado: tags=parallel:1 """ mylib = script.TemporaryScript( "test2.py", AVOCADO_SIMPLE_PYTHON_LIKE_MULTIPLE_FILES_LIB, "avocado_simpletest_functional", self.MODE_0664, ) mylib.save() mytest = script.Script( os.path.join(os.path.dirname(mylib.path), "test.py"), AVOCADO_SIMPLE_PYTHON_LIKE_MULTIPLE_FILES, ) os.chdir(BASEDIR) mytest.save() cmd_line = f"{AVOCADO} -V list {mytest}" result = process.run(cmd_line) self.assertIn(b"exec-test: 1", result.stdout) # job should be able to finish under 5 seconds. If this fails, it's # possible that we hit the "simple test fork bomb" bug cmd_line = (f"{AVOCADO} run --disable-sysinfo --job-results-dir " f"'{self.tmpdir.name}' -- '{mytest}'") self._run_with_timeout(cmd_line, 5)
def test_log_to_debug(self): test = script.Script( os.path.join(self.tmpdir.name, "output_test.py"), OUTPUT_TEST_CONTENT ) test.save() result = process.run( f"{AVOCADO} run " f"--job-results-dir {self.tmpdir.name} " f"--disable-sysinfo --json - -- {test}" ) res = json.loads(result.stdout_text) logfile = res["tests"][0]["logfile"] # Today, process.run() calls are not part of the test stdout or stderr. # Instead those are registered as part of avocado.utils.process logging # system. Let's just add a "DEBUG| " to make sure this will not get # confused with [stdout]. expected = [ b" DEBUG| [stdout] top_process", b" DEBUG| [stdout] init_process", b" DEBUG| [stdout] test_process", b" DEBUG| [stderr] __test_stderr__", b" DEBUG| [stdout] __test_stdout__", ] self._check_output(logfile, expected)
def test_check_record_no_module_default(self): """ Checks that the `avocado.utils.process` module won't have a output check record mode (`OUTPUT_CHECK_RECORD_MODE`) set by default. The reason is that, if this is always set from the command line runner, we can't distinguish from a situation where the module level configuration should be applied as a fallback to the API parameter. By leaving it unset by default, the command line option parameter value `none` will slightly change its behavior, meaning that it will explicitly disable output check record when asked to do so. """ with script.Script( os.path.join(self.tmpdir.name, "output_mode_none.py"), OUTPUT_MODE_NONE_CONTENT, script.READ_ONLY_MODE) as test: command = ("%s run --job-results-dir %s --sysinfo=off " "--json - --output-check-record none -- %s") % ( AVOCADO, self.tmpdir.name, test.path) result = process.run(command) res = json.loads(result.stdout_text) testdir = res["tests"][0]["logdir"] for output_file in ('stdout', 'stderr', 'output'): output_file_path = os.path.join(testdir, output_file) self.assertTrue(os.path.exists(output_file_path)) with open(output_file_path, 'r') as output: self.assertEqual(output.read(), '')
def test_print_to_std(self): def _check_output(path, exps, name): i = 0 end = len(exps) with open(path, 'rb') as output_file: output_file_content = output_file.read() output_file.seek(0) for line in output_file: if exps[i] in line: i += 1 if i == end: break exps_text = "\n".join([exp.decode() for exp in exps]) error_msg = ("Failed to find message in position %s from\n%s\n" "\nin the %s. Either it's missing or in wrong " "order.\n%s" % (i, exps_text, name, output_file_content)) self.assertEqual(i, end, error_msg) test = script.Script(os.path.join(self.tmpdir.name, "output_test.py"), OUTPUT_TEST_CONTENT) test.save() result = process.run("%s run --job-results-dir %s --sysinfo=off " "--json - -- %s" % (AVOCADO, self.tmpdir.name, test)) res = json.loads(result.stdout_text) joblog = res["debuglog"] exps = [ b"[stdout] top_print", b"[stdout] top_stdout", b"[stderr] top_stderr", b"[stdout] top_process", b"[stdout] init_print", b"[stdout] init_stdout", b"[stderr] init_stderr", b"[stdout] init_process", b"[stdout] test_print", b"[stdout] test_stdout", b"[stderr] test_stderr", b"[stdout] test_process" ] _check_output(joblog, exps, "job.log") testdir = res["tests"][0]["logdir"] with open(os.path.join(testdir, "stdout"), 'rb') as stdout_file: self.assertEqual( b"test_print\ntest_stdout\ntest_process__test_stdout__", stdout_file.read()) with open(os.path.join(testdir, "stderr"), 'rb') as stderr_file: self.assertEqual(b"test_stderr\n__test_stderr__", stderr_file.read()) # Now run the same test, but with combined output # combined output can not keep track of sys.stdout and sys.stdout # writes, as they will eventually be out of sync. In fact, # the correct fix is to run the entire test process with redirected # stdout and stderr, and *not* play with sys.stdout and sys.stderr. # But this change will come later result = process.run("%s run --job-results-dir %s --sysinfo=off " "--output-check-record=combined " "--json - -- %s" % (AVOCADO, self.tmpdir.name, test)) res = json.loads(result.stdout_text) testdir = res["tests"][0]["logdir"] with open(os.path.join(testdir, "output")) as output_file: self.assertEqual("test_process__test_stderr____test_stdout__", output_file.read())
def test_status_non_zero(self): """ Checks warning when script returns non-zero status """ non_zero_script = script.Script( os.path.join(self.pre_dir, "non_zero.sh"), SCRIPT_NON_ZERO_STATUS ) non_zero_script.save() config = script.TemporaryScript( "non_zero.conf", SCRIPT_NON_ZERO_CFG % self.pre_dir ) with config: cmd = ( f"{AVOCADO} --config {config} run " f"--job-results-dir {self.tmpdir.name} " f"--disable-sysinfo examples/tests/passtest.py" ) result = process.run(cmd) # Pre/Post scripts failures do not (currently?) alter the exit status self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK) self.assertIn( f'Pre job script "{non_zero_script}" exited with status "1"\n', result.stderr_text, )
def test_verify_whiteboard_save(self): tmpfile = tempfile.mktemp() try: os.chdir(basedir) config = os.path.join(self.tmpdir, "conf.ini") content = ("[datadir.paths]\nlogs_dir = %s" % os.path.relpath(self.tmpdir, ".")) script.Script(config, content).save() cmd_line = ('./scripts/avocado --config %s --show all run ' '--sysinfo=off whiteboard.py --json %s' % (config, tmpfile)) result = process.run(cmd_line, ignore_status=True) expected_rc = exit_codes.AVOCADO_ALL_OK self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) with open(tmpfile, 'r') as fp: json_results = json.load(fp) logfile = json_results['tests'][0]['logfile'] debug_dir = os.path.dirname(logfile) whiteboard_path = os.path.join(debug_dir, 'whiteboard') self.assertTrue(os.path.exists(whiteboard_path), 'Missing whiteboard file %s' % whiteboard_path) finally: try: os.remove(tmpfile) except OSError: pass
def test_check_on_off(self): """ Checks that output will always be kept, but it will only make into the *test* stdout/stderr/output files when it's not explicitly disabled This control is defined as an API parameter, `allow_output_check`, so it should be possible to enable/disable it on each call. """ with script.Script( os.path.join(self.tmpdir.name, "test_check_on_off.py"), OUTPUT_CHECK_ON_OFF_CONTENT, script.READ_ONLY_MODE) as test: command = ("%s run --job-results-dir %s --sysinfo=off " "--json - -- %s") % (AVOCADO, self.tmpdir.name, test.path) result = process.run(command) res = json.loads(result.stdout_text) testdir = res["tests"][0]["logdir"] stdout_path = os.path.join(testdir, 'stdout') self.assertTrue(os.path.exists(stdout_path)) with open(stdout_path, 'r') as stdout: self.assertEqual( stdout.read(), '__STDOUT_CONTENT____STDOUT_DO_RECORD_CONTENT__') stderr_path = os.path.join(testdir, 'stderr') self.assertTrue(os.path.exists(stderr_path)) with open(stderr_path, 'r') as stderr: self.assertEqual( stderr.read(), '__STDERR_CONTENT____STDERR_DO_RECORD_CONTENT__')
def test_non_existing_dir(self): """ Checks warning with non existing pre dir """ non_zero_script = script.Script( os.path.join(self.pre_dir, "non_zero.sh"), SCRIPT_NON_ZERO_STATUS ) non_zero_script.save() self.pre_dir = "/non/existing/dir" config = script.TemporaryScript( "non_existing_dir.conf", SCRIPT_NON_EXISTING_DIR_CFG % self.pre_dir ) with config: cmd = ( f"{AVOCADO} --config {config} run " f"--job-results-dir {self.tmpdir.name} " f"--disable-sysinfo examples/tests/passtest.py" ) result = process.run(cmd) # Pre/Post scripts failures do not (currently?) alter the exit status self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK) self.assertIn(b"-job scripts has not been found", result.stderr) self.assertNotIn( f'Pre job script "{non_zero_script}" exited with status "1"', result.stderr_text, )
def test_verify_whiteboard_save(self): tmpfile = tempfile.mktemp(dir=self.tmpdir.name) config = os.path.join(self.tmpdir.name, "conf.ini") content = ( "[datadir.paths]\nlogs_dir = %s" # pylint: disable=C0209 % os.path.relpath(self.tmpdir.name, ".") ) script.Script(config, content).save() cmd_line = ( f"{AVOCADO} --config {config} --show all run " f"--job-results-dir {self.tmpdir.name} " f"--disable-sysinfo examples/tests/whiteboard.py " f"--json {tmpfile}" ) result = process.run(cmd_line, ignore_status=True) expected_rc = exit_codes.AVOCADO_ALL_OK self.assertEqual( result.exit_status, expected_rc, (f"Avocado did not return rc {expected_rc}:" f"\n{result}"), ) with open(tmpfile, "r", encoding="utf-8") as fp: json_results = json.load(fp) logfile = json_results["tests"][0]["logfile"] debug_dir = os.path.dirname(logfile) whiteboard_path = os.path.join(debug_dir, "whiteboard") self.assertTrue( os.path.exists(whiteboard_path), f"Missing whiteboard file {whiteboard_path}", )
def test_single_success(self): with script.Script(os.path.join(self.tmpdir.name, 'test_single_success.py'), SINGLE_SUCCESS_CHECK) as test: command = self.command % (AVOCADO, test.path) result = process.run(command, ignore_status=True) self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK) self.assertIn('PASS 1', result.stdout_text,) self.assertNotIn('bash', result.stdout_text,)
def test_multiple_success(self): with script.Script(os.path.join(self.tmpdir.name, 'test_multiple_success.py'), MULTIPLE_SUCCESS) as test: command = self.command % (AVOCADO, test.path) result = process.run(command, ignore_status=True) self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK) self.assertIn('PASS 3', result.stdout_text,) self.assertNotIn('vim-common', result.stdout_text,)
def test_multiple_fails(self): with script.Script(os.path.join(self.tmpdir.name, 'test_multiple_fail.py'), MULTIPLE_FAIL) as test: command = self.command % (AVOCADO, test.path) result = process.run(command, ignore_status=True) self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK) self.assertIn('PASS 1', result.stdout_text,) self.assertIn('SKIP 2', result.stdout_text,) self.assertNotIn('-foo-bar-', result.stdout_text,)
def test_runner_test_with_local_imports(self): prefix = temp_dir_prefix(self) with tempfile.TemporaryDirectory(prefix=prefix) as libdir: with script.Script( os.path.join(libdir, "mylib.py"), "def hello():\n return 'Hello world'", ): with script.Script( os.path.join(libdir, "test_local_imports.py"), ("from avocado import Test\n" "from mylib import hello\n" "class LocalImportTest(Test):\n" " def test(self):\n" " self.log.info(hello())\n"), ) as mytest: cmd_line = (f"{AVOCADO} run --disable-sysinfo " f"--job-results-dir {self.tmpdir.name} " f"{mytest}") process.run(cmd_line)
def test_runner_test_with_local_imports(self): mylib = script.TemporaryScript('mylib.py', HELLO_LIB_CONTENTS, 'avocado_simpletest_functional') mylib.save() mytest = script.Script( os.path.join(os.path.dirname(mylib.path), 'test_local_imports.py'), LOCAL_IMPORT_TEST_CONTENTS) os.chdir(basedir) mytest.save() cmd_line = ("./scripts/avocado run --sysinfo=off --job-results-dir %s " "%s" % (self.tmpdir, mytest)) process.run(cmd_line)
def test_logdir_path(self): test = script.Script(os.path.join(self.tmpdir.name, "logdir_test.py"), TEST_LOGDIR) test.save() result = process.run("%s run --job-results-dir %s --disable-sysinfo " "--json - -- %s" % (AVOCADO, self.tmpdir.name, test)) res = json.loads(result.stdout_text) logfile = res["tests"][0]["logfile"] testdir = res["tests"][0]["logdir"] with open(logfile, 'r') as debug_file: expected = "logdir is: %s" % testdir self.assertIn(expected, debug_file.read())
def test_logdir_path(self): test = script.Script(os.path.join(self.tmpdir.name, "logdir_test.py"), TEST_LOGDIR) test.save() result = process.run(f"{AVOCADO} run " f"--job-results-dir {self.tmpdir.name} " f"--disable-sysinfo --json - -- {test}") res = json.loads(result.stdout_text) logfile = res["tests"][0]["logfile"] testdir = res["tests"][0]["logdir"] with open(logfile, "r", encoding="utf-8") as debug_file: expected = f"logdir is: {testdir}" self.assertIn(expected, debug_file.read())
def test_show(self): """ Checks if `core.show` is respected in different configurations. """ with script.Script(os.path.join(self.tmpdir.name, "test_show.py"), OUTPUT_SHOW_TEST, script.READ_ONLY_MODE) as test: cmd = "%s run --disable-sysinfo -- %s" % (AVOCADO, test.path) result = process.run(cmd) expected_job_id_number = 1 expected_bin_true_number = 0 job_id_number = result.stdout_text.count('JOB ID') bin_true_number = result.stdout_text.count('/bin/true') self.assertEqual(expected_job_id_number, job_id_number) self.assertEqual(expected_bin_true_number, bin_true_number)
def test_avocado_instrumented(self): with script.Script(os.path.join(self.tmpdir.name, "passtest.py"), TEST_INSTRUMENTED_PASS) as test: result = process.run( f"{AVOCADO} run " f"--job-results-dir {self.tmpdir.name} " f"--disable-sysinfo --nrunner-spawner=podman " f"--spawner-podman-image=fedora:latest -- " f"{test}", ignore_status=True) self.assertEqual(result.exit_status, 0) self.assertIn("passtest.py:PassTest.test: STARTED", result.stdout_text) self.assertIn("passtest.py:PassTest.test: PASS", result.stdout_text)
def test_avoid_output_on_non_test_task(self): """ Ensure that a `Task` that is not a `test` is skipped, and the output is not displayed while the job runs. """ with script.Script(os.path.join(self.tmpdir.name, 'test_avoid_output_non_test_tasks.py'), AVOID_NON_TEST_TASKS) as job: result = process.run(job.path, ignore_status=True) self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK) self.assertIn('/bin/true', result.stdout_text) self.assertIn('PASS 1', result.stdout_text,) self.assertIn('SKIP 1', result.stdout_text,) self.assertNotIn('/bin/false', result.stdout_text)
def test_print_to_std(self): test = script.Script(os.path.join(self.tmpdir.name, "output_test.py"), OUTPUT_TEST_CONTENT) test.save() result = process.run("%s run --job-results-dir %s --disable-sysinfo " "--json - -- %s" % (AVOCADO, self.tmpdir.name, test)) res = json.loads(result.stdout_text) logfile = res["tests"][0]["logfile"] exps = [b"[stdout] top_print", b"[stdout] top_stdout", b"[stderr] top_stderr", b"[stdout] init_print", b"[stdout] init_stdout", b"[stderr] init_stderr", b"[stdout] test_print", b"[stdout] test_stdout", b"[stderr] test_stderr"] self._check_output(logfile, exps) testdir = res["tests"][0]["logdir"] with open(os.path.join(testdir, "stdout"), 'rb') as stdout_file: expected = b"top_print\n\ntop_stdout\ninit_print\n\ninit_stdout\ntest_print\n\ntest_stdout\n" self.assertEqual(expected, stdout_file.read()) with open(os.path.join(testdir, "stderr"), 'rb') as stderr_file: expected = b"top_stderr\ninit_stderr\ntest_stderr\n" self.assertEqual(expected, stderr_file.read()) # With the nrunner, output combined result are inside job.log result = process.run("%s run --job-results-dir %s --disable-sysinfo " "--json - -- %s" % (AVOCADO, self.tmpdir.name, test)) res = json.loads(result.stdout_text) with open(logfile, 'rb') as output_file: expected = [b'[stdout] top_print\n', b'[stdout] \n', b'[stdout] top_stdout\n', b'[stderr] top_stderr\n', b'[stdout] init_print\n', b'[stdout] \n', b'[stdout] init_stdout\n', b'[stderr] init_stderr\n', b'[stdout] test_print\n', b'[stdout] \n', b'[stdout] test_stdout\n', b'[stderr] test_stderr\n'] result = list(filter(lambda x: x.startswith((b'[stdout]', b'[stderr]')), output_file.readlines())) self.assertEqual(expected, result)
def test_multiple_success(self): with script.Script( os.path.join(self.workdir, "test_multiple_success.py"), MULTIPLE_SUCCESS ) as test: command = self.get_command(test.path) result = process.run(command, ignore_status=True) self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK) self.assertIn( "PASS 3", result.stdout_text, ) self.assertNotIn( "vim-common", result.stdout_text, )
def test_single_success(self): with script.Script( os.path.join(self.workdir, "test_single_success.py"), SINGLE_SUCCESS_CHECK ) as test: command = self.get_command(test.path) result = process.run(command, ignore_status=True) self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK) self.assertIn( "PASS 1", result.stdout_text, ) self.assertNotIn( "bash", result.stdout_text, )