def test_get_json(self): runnable = Runnable('noop', '_uri_', 'arg1', 'arg2') expected = ('{"kind": "noop", ' '"uri": "_uri_", ' '"config": {}, ' '"args": ["arg1", "arg2"]}') self.assertEqual(runnable.get_json(), expected)
def test_runnable_to_recipe_noop(self): runnable = Runnable('noop', None) recipe_path = os.path.join(self.tmpdir.name, 'recipe.json') runnable.write_json(recipe_path) self.assertTrue(os.path.exists(recipe_path)) loaded_runnable = Runnable.from_recipe(recipe_path) self.assertEqual(loaded_runnable.kind, 'noop')
def test_runner_from_runnable_error(self): try: runnable = Runnable('unsupported_kind', '') runnable.pick_runner_class() except ValueError as e: self.assertEqual(str(e), 'Unsupported kind of runnable: unsupported_kind')
def test_runnable_to_recipe_uri(self): runnable = Runnable('exec-test', '/bin/true') recipe_path = os.path.join(self.tmpdir.name, 'recipe.json') runnable.write_json(recipe_path) self.assertTrue(os.path.exists(recipe_path)) loaded_runnable = Runnable.from_recipe(recipe_path) self.assertEqual(loaded_runnable.kind, 'exec-test') self.assertEqual(loaded_runnable.uri, '/bin/true')
def test_runner_noop(self): runnable = Runnable('noop', None) runner_klass = runnable.pick_runner_class() runner = runner_klass() results = [status for status in runner.run(runnable)] last_result = results[-1] self.assertEqual(last_result['status'], 'finished') self.assertIn('time', last_result)
def test_runner_noop(self): runnable = Runnable("noop", None) runner_klass = runnable.pick_runner_class() runner = runner_klass() results = [status for status in runner.run(runnable)] last_result = results[-1] self.assertEqual(last_result["status"], "finished") self.assertIn("time", last_result)
def test_runnable_to_recipe_uri(self): runnable = Runnable("exec-test", "/bin/true") recipe_path = os.path.join(self.tmpdir.name, "recipe.json") runnable.write_json(recipe_path) self.assertTrue(os.path.exists(recipe_path)) loaded_runnable = Runnable.from_recipe(recipe_path) self.assertEqual(loaded_runnable.kind, "exec-test") self.assertEqual(loaded_runnable.uri, "/bin/true")
def test_get_dict(self): runnable = Runnable('noop', '_uri_', 'arg1', 'arg2') self.assertEqual(runnable.get_dict(), { 'kind': 'noop', 'uri': '_uri_', 'args': ('arg1', 'arg2'), 'config': {} })
def test_runner_python_unittest_empty_uri_error(self): runnable = Runnable("python-unittest", "") runner_klass = runnable.pick_runner_class() runner = runner_klass() results = [status for status in runner.run(runnable)] output = "Invalid URI: could not be converted to an unittest dotted name." result = results[-1] self.assertEqual(result["status"], "finished") self.assertEqual(result["result"], "error") self.assertEqual(result["output"], output)
def test_runner_python_unittest_empty_uri_error(self): runnable = Runnable('python-unittest', '') runner_klass = runnable.pick_runner_class() runner = runner_klass() results = [status for status in runner.run(runnable)] output = ("Invalid URI: could not be converted to an unittest " "dotted name.") result = results[-1] self.assertEqual(result['status'], 'finished') self.assertEqual(result['result'], 'error') self.assertEqual(result['output'], output)
def python_resolver(name, reference, find_tests): module_path, tests_filter = reference_split(reference) if tests_filter is not None: tests_filter = re.compile(tests_filter) criteria_check = check_file(module_path, reference) if criteria_check is not True: return criteria_check # disabled tests not needed here class_methods_info, _ = find_tests(module_path) runnables = [] for klass, methods_tags_depens in class_methods_info.items(): for (method, tags, depens) in methods_tags_depens: klass_method = f"{klass}.{method}" if tests_filter is not None and not tests_filter.search( klass_method): continue uri = f"{module_path}:{klass_method}" runnables.append( Runnable(name, uri=uri, tags=tags, dependencies=depens)) if runnables: return ReferenceResolution(reference, ReferenceResolutionResult.SUCCESS, runnables) return ReferenceResolution(reference, ReferenceResolutionResult.NOTFOUND)
def test_is_task_kind_supported(self): cmd = [ "sh", "-c", 'test $0 = capabilities && echo -n {\\"runnables\\": [\\"mykind\\"]}', ] self.assertTrue(Runnable.is_kind_supported_by_runner_command(self.kind, cmd))
def test_runner_exec_test_fail(self): runnable = Runnable("exec-test", "/bin/false") runner_klass = runnable.pick_runner_class() runner = runner_klass() results = [status for status in runner.run(runnable)] stdout_result = results[-3] stderr_result = results[-2] last_result = results[-1] self.assertEqual(stdout_result["type"], "stdout") self.assertEqual(stdout_result["log"], b"") self.assertEqual(stderr_result["type"], "stderr") self.assertEqual(stderr_result["log"], b"") self.assertEqual(last_result["status"], "finished") self.assertEqual(last_result["result"], "fail") self.assertEqual(last_result["returncode"], 1) self.assertIn("time", last_result)
def resolve(reference): # It may be possible to have Robot Framework tests in other # types of files such as reStructuredText (.rst), but given # that we're not testing that, let's restrict to files ending # in .robot files criteria_check = check_file(reference, reference, suffix='.robot') if criteria_check is not True: return criteria_check robot_suite = find_tests(reference, test_suite={}) runnables = [] for key, value in robot_suite.items(): for robot_test in value: uri = (f"{robot_test['test_source']}:" f"{key}.{robot_test['test_name']}") runnables.append(Runnable('robot', uri=uri)) if runnables: return ReferenceResolution(reference, ReferenceResolutionResult.SUCCESS, runnables) return ReferenceResolution(reference, ReferenceResolutionResult.NOTFOUND)
def _get_resolutions_by_kind(self, kind, paths): self.validate_kind_section(kind) resolutions = [] success = ReferenceResolutionResult.SUCCESS config = { 'uri': self._get_uri_from_section(kind), 'args': self._get_args_from_section(kind), 'kwargs': self._get_kwargs_from_section(kind) } for path in paths: uri = config.get('uri') args = config.get('args') kwargs = config.get('kwargs') if uri == '$testpath': uri = path if '$testpath' in args: args = [item.replace('$testpath', path) for item in args] if '$testpath' in kwargs.values(): kwargs = { k: v.replace('$testpath', path) for k, v in kwargs.items() } runnable = Runnable(kind, uri, *args, **kwargs) resolutions.append( ReferenceResolution(reference=path, result=success, resolutions=[runnable], origin=path)) return resolutions
def _get_resolutions_by_kind(self, kind, paths): self.validate_kind_section(kind) resolutions = [] success = ReferenceResolutionResult.SUCCESS config = { "uri": self._get_uri_from_section(kind), "args": self._get_args_from_section(kind), "kwargs": self._get_kwargs_from_section(kind), } for path in paths: uri = config.get("uri") args = config.get("args") kwargs = config.get("kwargs") if uri == "$testpath": uri = path if "$testpath" in args: args = [item.replace("$testpath", path) for item in args] if "$testpath" in kwargs.values(): kwargs = { k: v.replace("$testpath", path) for k, v in kwargs.items() } runnable = Runnable(kind, uri, *args, **kwargs) resolutions.append( ReferenceResolution(reference=path, result=success, resolutions=[runnable], origin=path)) return resolutions
def test_runner_exec(self): runnable = Runnable('exec-test', sys.executable, '-c', 'import time; time.sleep(0.01)') runner_klass = runnable.pick_runner_class() runner = runner_klass() results = [status for status in runner.run(runnable)] stdout_result = results[-3] stderr_result = results[-2] last_result = results[-1] self.assertEqual(stdout_result['type'], 'stdout') self.assertEqual(stdout_result['log'], b'') self.assertEqual(stderr_result['type'], 'stderr') self.assertEqual(stderr_result['log'], b'') self.assertEqual(last_result['status'], 'finished') self.assertEqual(last_result['returncode'], 0) self.assertIn('time', last_result)
def test_is_task_kind_supported_other_kind(self): cmd = [ 'sh', '-c', 'test $0 = capabilities && ' 'echo -n {\\"runnables\\": [\\"otherkind\\"]}' ] self.assertFalse( Runnable.is_kind_supported_by_runner_command(self.kind, cmd))
def test_runner_exec_test_fail(self): runnable = Runnable('exec-test', '/bin/false') runner_klass = runnable.pick_runner_class() runner = runner_klass() results = [status for status in runner.run(runnable)] stdout_result = results[-3] stderr_result = results[-2] last_result = results[-1] self.assertEqual(stdout_result['type'], 'stdout') self.assertEqual(stdout_result['log'], b'') self.assertEqual(stderr_result['type'], 'stderr') self.assertEqual(stderr_result['log'], b'') self.assertEqual(last_result['status'], 'finished') self.assertEqual(last_result['result'], 'fail') self.assertEqual(last_result['returncode'], 1) self.assertIn('time', last_result)
def test_runner_exec(self): runnable = Runnable( "exec-test", sys.executable, "-c", "import time; time.sleep(0.01)" ) runner_klass = runnable.pick_runner_class() runner = runner_klass() results = [status for status in runner.run(runnable)] stdout_result = results[-3] stderr_result = results[-2] last_result = results[-1] self.assertEqual(stdout_result["type"], "stdout") self.assertEqual(stdout_result["log"], b"") self.assertEqual(stderr_result["type"], "stderr") self.assertEqual(stderr_result["log"], b"") self.assertEqual(last_result["status"], "finished") self.assertEqual(last_result["returncode"], 0) self.assertIn("time", last_result)
def test_runner_python_unittest_skip(self): runnable = Runnable( "python-unittest", "selftests/.data/unittests.py:Second.test_skip" ) runner_klass = runnable.pick_runner_class() runner = runner_klass() results = [status for status in runner.run(runnable)] output1 = ( b"----------------------------------------------------------------------\n" b"Ran 1 test in " ) output2 = b"s\n\nOK (skipped=1)\n" output = results[-2] result = results[-1] self.assertEqual(result["status"], "finished") self.assertEqual(result["result"], "skip") self.assertTrue(output["log"].startswith(output1), "Start of output differs") self.assertTrue(output["log"].endswith(output2), "End of output differs")
def resolve(reference): if reference not in VALID_MAGIC_WORDS: return ReferenceResolution( reference, ReferenceResolutionResult.NOTFOUND, info=f'Word "{reference}" is not a valid magic word') return ReferenceResolution(reference, ReferenceResolutionResult.SUCCESS, [Runnable('magic', reference)])
def test_kwargs_json_empty_dict(self): parsed_args = { 'kind': 'noop', 'uri': None, 'kwargs': [('empty', 'json:{}')] } runnable = Runnable.from_args(parsed_args) self.assertEqual(runnable.kind, 'noop') self.assertIsNone(runnable.uri) self.assertEqual(runnable.kwargs.get('empty'), {})
def test_exec_args(self): parsed_args = { 'kind': 'exec-test', 'uri': '/path/to/executable', 'arg': ['-a', '-b', '-c'] } runnable = Runnable.from_args(parsed_args) self.assertEqual(runnable.kind, 'exec-test') self.assertEqual(runnable.uri, '/path/to/executable') self.assertEqual(runnable.args, ('-a', '-b', '-c')) self.assertEqual(runnable.kwargs, {})
def test_recipe_exec(self): open_mocked = unittest.mock.mock_open( read_data=('{"kind": "exec-test", "uri": "/bin/sh", ' '"args": ["/etc/profile"], ' '"kwargs": {"TERM": "vt3270"}}')) with unittest.mock.patch("builtins.open", open_mocked): runnable = Runnable.from_recipe("fake_path") self.assertEqual(runnable.kind, "exec-test") self.assertEqual(runnable.uri, "/bin/sh") self.assertEqual(runnable.args, ("/etc/profile", )) self.assertEqual(runnable.kwargs, {"TERM": "vt3270"})
def test_exec_args(self): parsed_args = { "kind": "exec-test", "uri": "/path/to/executable", "arg": ["-a", "-b", "-c"], } runnable = Runnable.from_args(parsed_args) self.assertEqual(runnable.kind, "exec-test") self.assertEqual(runnable.uri, "/path/to/executable") self.assertEqual(runnable.args, ("-a", "-b", "-c")) self.assertEqual(runnable.kwargs, {})
def test_runnable_args_kwargs(self): runnable = Runnable('noop', 'uri', 'arg1', 'arg2', key1='val1', key2='val2') self.assertIn('arg1', runnable.args) self.assertIn('arg2', runnable.args) self.assertEqual(runnable.kwargs.get('key1'), 'val1') self.assertEqual(runnable.kwargs.get('key2'), 'val2')
def resolve(reference): # pylint: disable=W0221 if reference not in VALID_MAGIC_WORDS: return ReferenceResolution( reference, ReferenceResolutionResult.NOTFOUND, info=f'Word "{reference}" is not a valid magic word', ) return ReferenceResolution(reference, ReferenceResolutionResult.SUCCESS, [Runnable("magic", reference)])
def command_runnable_run_recipe(self, args): """ Runs a runnable definition from a recipe :param args: parsed command line arguments turned into a dictionary :type args: dict """ runnable = Runnable.from_recipe(args.get("recipe")) runner = self.get_runner_from_runnable(runnable) for status in runner.run(runnable): self.echo(status)
def _update_avocado_configuration_used_on_runnables(runnables, config): """Updates the config used on runnables with this suite's config values :param runnables: the tasks whose runner requirements will be checked :type runnables: list of :class:`Runnable` :param config: A config dict to be used on the desired test suite. :type config: dict """ for runnable in runnables: runnable.config = Runnable.filter_runnable_config( runnable.kind, config)