def handles_invalid_kwargs_like_any_other_function(self): try: self._run(_, nope_noway_nohow='as if') except TypeError as e: ok_('got an unexpected keyword argument' in str(e)) else: assert False, "Invalid run() kwarg didn't raise TypeError"
def exited_is_None(self): try: self._watcher_error() except Failure as e: exited = e.result.exited err = "Expected None, got {0!r}".format(exited) ok_(exited is None, err)
def stringrep_notes_exit_status(self): try: self._regular_error() except Failure as e: ok_("exited with status 1" in str(e.result)) else: assert False, "Did not raise Failure!"
def exited_is_integer(self): try: self._regular_error() except Failure as e: ok_(isinstance(e.result.exited, int)) else: assert False, "Did not raise Failure!"
def displays_expectations_and_component_statuses(self): _mock_status(self) # TODO: make things more organic/specific/less tabular: # # current git branch: xxx (implies type yyy) # changelog: xxx # so the next release would be: a.b.c (or: 'so the release we're # cutting/expecting is a.b.c') # version file: <status output including current value> # git tag: <status output saying found/not found> (maybe including # latest that is found? that's extra logic...) # etc... parts = dict( changelog=Changelog.NEEDS_RELEASE.value, version=VersionFile.NEEDS_BUMP.value, tag=Tag.NEEDS_CUTTING.value, ) for part in parts: parts[part] = re.escape(parts[part]) parts['header_footer'] = r'-+ +-+' # NOTE: forces impl to follow specific order, which is good regex = r""" {header_footer} Changelog +{changelog} Version +{version} Tag +{tag} {header_footer} """.format(**parts).strip() output = sys.stdout.getvalue() err = "Expected:\n\n{0}\n\nGot:\n\n{1}".format(regex, output) err += "\n\nRepr edition...\n\nExpected:\n\n{0!r}\n\nGot:\n\n{1!r}".format(regex, output) # noqa ok_(re.match(regex, output), err)
def optional_prevents_bool_defaults_from_affecting_kind(self): # Re #416. See notes in the function under test for rationale. @task(optional=['myarg']) def mytask(c, myarg=False): pass arg = mytask.get_arguments()[0] ok_(arg.kind is str) # not bool!
def modifications_on_clone_do_not_alter_original(self): # Setup orig = Call( self.task, called_as='foo', args=[1, 2, 3], kwargs={'key': 'val'} ) context = Context() context['setting'] = 'value' orig.context = context # Clone & tweak clone = orig.clone() newtask = Task(Mock(__name__='meh')) clone.task = newtask clone.called_as = 'notfoo' clone.args[0] = 7 clone.kwargs['key'] = 'notval' clone.context['setting'] = 'notvalue' # Compare ok_(clone.task is not orig.task) eq_(orig.called_as, 'foo') eq_(clone.called_as, 'notfoo') eq_(orig.args, [1, 2, 3]) eq_(clone.args, [7, 2, 3]) eq_(orig.kwargs['key'], 'val') eq_(clone.kwargs['key'], 'notval') eq_(orig.context['setting'], 'value') eq_(clone.context['setting'], 'notvalue')
def base_case(self): queue = Queue() t = EHThread(target=self.worker, args=[queue]) t.start() t.join() eq_(queue.get(block=False), 7) ok_(queue.empty())
def base_case(self): queue = Queue() t = self.klass(queue=queue) t.start() t.join() eq_(queue.get(block=False), 7) ok_(queue.empty())
def preserves_basic_members(self): c1 = Config( defaults={'key': 'default'}, overrides={'key': 'override'}, system_prefix='global', user_prefix='user', project_home='project', runtime_path='runtime.yaml', ) c2 = c1.clone() # NOTE: expecting identical defaults also implicitly tests that # clone() passes in defaults= instead of doing an empty init + # copy. (When that is not the case, we end up with # global_defaults() being rerun and re-added to _defaults...) eq_(c2._defaults, c1._defaults) ok_(c2._defaults is not c1._defaults) eq_(c2._overrides, c1._overrides) ok_(c2._overrides is not c1._overrides) eq_(c2._system_prefix, c1._system_prefix) eq_(c2._user_prefix, c1._user_prefix) eq_(c2._project_home, c1._project_home) eq_(c2.prefix, c1.prefix) eq_(c2.file_prefix, c1.file_prefix) eq_(c2.env_prefix, c1.env_prefix) eq_(c2._runtime_path, c1._runtime_path)
def prefixes_command_with_sudo(self, Local): runner = Local.return_value Context().sudo('whoami') # NOTE: implicitly tests default sudo.prompt conf value cmd = "sudo -S -p '[sudo] password: ' whoami" ok_(runner.run.called, "sudo() never called runner.run()!") eq_(runner.run.call_args[0][0], cmd)
def can_be_pickled(self): c = Context() c.foo = {'bar': {'biz': ['baz', 'buzz']}} c2 = pickle.loads(pickle.dumps(c)) eq_(c, c2) ok_(c is not c2) ok_(c.foo.bar.biz is not c2.foo.bar.biz)
def is_exception_when_WatcherError_raised_internally(self): try: self._watcher_error() except Failure as e: ok_(isinstance(e.reason, WatcherError)) else: assert False, "Failed to raise Failure!"
def dict_value_merges_are_not_references(self): core = {} coll = {'foo': {'bar': {'biz': 'coll value'}}} proj = {'foo': {'bar': {'biz': 'proj value'}}} # Initial merge - when bug present, this sets core['foo'] to the entire # 'foo' dict in 'proj' as a reference - meaning it 'links' back to the # 'proj' dict whenever other things are merged into it merge_dicts(core, proj) eq_(core, {'foo': {'bar': {'biz': 'proj value'}}}) eq_(proj['foo']['bar']['biz'], 'proj value') # Identity tests can also prove the bug early ok_(core['foo'] is not proj['foo'], "Core foo is literally proj foo!") # Subsequent merge - just overwrites leaf values this time (thus no # real change, but this is what real config merge code does, so why # not) merge_dicts(core, proj) eq_(core, {'foo': {'bar': {'biz': 'proj value'}}}) eq_(proj['foo']['bar']['biz'], 'proj value') # The problem merge - when bug present, core['foo'] references 'foo' # inside 'proj', so this ends up tweaking "core" but it actually # affects "proj" as well! merge_dicts(core, coll) # Expect that the core dict got the update from 'coll'... eq_(core, {'foo': {'bar': {'biz': 'coll value'}}}) # BUT that 'proj' remains UNTOUCHED eq_(proj['foo']['bar']['biz'], 'proj value')
def file_like_objects(self): fd = BytesIO() fd.write(b"yup\n") result = self.c.put(local=fd, remote=self.remote) eq_(open(self.remote).read(), "yup\n") eq_(result.remote, self.remote) ok_(result.local is fd)
def can_clone_into_a_subclass(self): orig = Call(self.task) class MyCall(Call): pass clone = orig.clone(into=MyCall) eq_(clone, orig) ok_(isinstance(clone, MyCall))
def comparison_looks_at_merged_config(self): c1 = Config(defaults={'foo': {'bar': 'biz'}}) # Empty defaults to suppress global_defaults c2 = Config(defaults={}, overrides={'foo': {'bar': 'biz'}}) ok_(c1 is not c2) ok_(c1._defaults != c2._defaults) eq_(c1, c2)
def task_has_no_help_shows_per_task_help(self): task1 = Context('mytask') init = Context(args=[Argument('help', optional=True)]) parser = Parser(initial=init, contexts=[task1]) result = parser.parse_argv(['mytask', '--help']) eq_(len(result), 2) eq_(result[0].args.help.value, 'mytask') ok_('help' not in result[1].args)
def hide_unknown_vals_mention_value_given_in_error(self): value = "penguinmints" try: run("command", hide=value) except ValueError, e: msg = "Error from run(hide=xxx) did not tell user what the bad value was!" msg += "\nException msg: %s" % e ok_(value in str(e), msg)
def kwargs(self): k = {'foo': 'bar'} self.executor.execute(('task1', k)) args = self.task1.body.call_args[0] kwargs = self.task1.body.call_args[1] ok_(isinstance(args[0], Context)) eq_(len(args), 1) eq_(kwargs['foo'], 'bar')
def returns_run_result(self, Local): runner = Local.return_value expected = runner.run.return_value result = Context().sudo('whoami') ok_( result is expected, "sudo() did not return run()'s return value!", )
def returns_deep_copy_of_given_dict(self): # NOTE: not actual deepcopy... source = {'foo': {'bar': {'biz': 'baz'}}} copy = copy_dict(source) eq_(copy['foo']['bar'], source['foo']['bar']) ok_(copy['foo']['bar'] is not source['foo']['bar']) copy['foo']['bar']['biz'] = 'notbaz' eq_(source['foo']['bar']['biz'], 'baz')
def works_correctly_when_subclassed(self): # Because sometimes, implementation #1 is really naive! class MyConfig(Config): pass c = MyConfig() ok_(isinstance(c, MyConfig)) # sanity c2 = c.clone() ok_(isinstance(c2, MyConfig)) # actual test
def returns_core_args_list(self): # Mostly so we encode explicity doc'd public API member in tests. # Spot checks good enough, --help tests include the full deal. core_args = Program().core_args() core_arg_names = [x.names[0] for x in core_args] for name in ('complete', 'help', 'pty', 'version'): ok_(name in core_arg_names) # Also make sure it's a list for easier tweaking/appending ok_(isinstance(core_args, list))
def prefixes_should_apply_to_run(self, Local): runner = Local.return_value ctx = Context() with ctx.prefix('cd foo'): ctx.run('whoami') cmd = "cd foo && whoami" ok_(runner.run.called, "run() never called runner.run()!") eq_(runner.run.call_args[0][0], cmd)
def python_modules_dont_load_special_vars(self): "Python modules don't load special vars" # Borrow another test's Python module. c = _load('system_prefix', 'python') # Sanity test that lowercase works eq_(c.hooray, 'python') # Real test that builtins, etc are stripped out for special in ('builtins', 'file', 'package', 'name', 'doc'): ok_('__{0}__'.format(special) not in c)
def prefixes_should_apply_to_sudo(self, Local): runner = Local.return_value ctx = Context() with ctx.prefix('cd foo'): ctx.sudo('whoami') cmd = "sudo -S -p '[sudo] password: ' cd foo && whoami" ok_(runner.run.called, "sudo() never called runner.run()!") eq_(runner.run.call_args[0][0], cmd)
def exhibits_is_dead_flag(self): t = self.klass(queue=None) t.start() t.join() ok_(t.is_dead) t = self.klass(queue=Queue()) t.start() t.join() ok_(not t.is_dead)
def excepted_command(self): group = Group('nopebadhost1', 'nopebadhost2') try: group.run('lolnope', hide=True) except GroupException as e: for value in e.result.values(): ok_(isinstance(value, gaierror)) else: assert False, "Did not raise GroupException!"
def nothing_is_written_to_stdin_by_default(self): # NOTE: technically if some goofus ran the tests by hand and mashed # keys while doing so...this would fail. LOL? # NOTE: this test seems not too useful but is a) a sanity test and # b) guards against e.g. breaking the autoresponder such that it # responds to "" or "\n" or etc. klass = self._mock_stdin_writer() self._runner(klass=klass).run(_) ok_(not klass.write_stdin.called)
def inline_config_overrides_via_merge_not_replacement(self): ok_('otherkey' in self.changed.configuration())
def missing_default_task_prints_help(self): with expect_exit(): _dispatch("inv -c foo") ok_("Core options:" in sys.stdout.getvalue())
def is_first_posarg(self): ok_(Call(_).task is _)
def per_task_help(self): ok_("Frobazz" in run("invoke -c _explicit foo --help").stdout)
def enabled_via_config(self): self._run("yup", settings={'run': {'echo': True}}) ok_("yup" in sys.stdout.getvalue())
def core_arg_parse_result_defaults_to_None(self): ok_(Executor(collection=Collection()).core is None)
def non_one_return_codes_still_act_as_False(self): ok_(not run("goobypls", warn=True, hide='both'))
def may_specify_namespace(self): foo = load('foo') ok_(Program(namespace=foo).namespace is foo)
def loader_class_defaults_to_FilesystemLoader(self): ok_(Program().loader_class is FilesystemLoader)
def is_normalized_to_a_tuple(self): ok_(isinstance(Argument(names=('a', 'b')).names, tuple))
def json_prevents_python(self): c = Config( system_prefix=join(CONFIGS_PATH, 'json-and-python', 'invoke')) ok_('python_only' not in c) ok_('json-only' in c) eq_(c.shared, 'json-value')
def yml_prevents_json_or_python(self): c = Config(system_prefix=join(CONFIGS_PATH, 'three-of-em/')) ok_('json-only' not in c) ok_('python_only' not in c) ok_('yml-only' in c) eq_(c.shared, 'yml-value')
def strings_replaced_with_env_value(self): os.environ['FOO'] = six.u('myvalue') c = Config(defaults={'foo': 'myoldvalue'}) c.load_shell_env() eq_(c.foo, six.u('myvalue')) ok_(isinstance(c.foo, six.text_type))
def non_predeclared_settings_do_not_get_consumed(self): os.environ['HELLO'] = "is it me you're looking for?" c = Config() c.load_shell_env() ok_('HELLO' not in c) ok_('hello' not in c)
def executor_class_defaults_to_Executor(self): ok_(Program().executor_class is Executor)
def run_acts_as_success_boolean(self): ok_(not run("false", warn=True)) ok_(run("true"))
def can_grant_access_to_core_arg_parse_result(self): c = ParserContext() ok_(Executor(collection=Collection(), core=c).core is c)
def config_class_defaults_to_Config(self): ok_(Program().config_class is Config)
def kwarg_beats_config(self): self._run("yup", echo=True, settings={'run': {'echo': False}}) ok_("yup" in sys.stdout.getvalue())
def help_output(self): ok_("Usage: inv[oke] " in run("invoke --help").stdout)
def enabled_via_kwarg(self): self._run("my command", echo=True) ok_("my command" in sys.stdout.getvalue())
def can_defer_post_init_step(self, post_init): Config() post_init.assert_called_once_with() post_init.reset_mock() Config(defer_post_init=True) ok_(not post_init.called)
def returns_new_but_equivalent_object(self): orig = Call(self.task) clone = orig.clone() ok_(clone is not orig) ok_(clone == orig)
def nonexistent_attr_setting_works_nested_too(self): c = Config() c.a_nest = {} eq_(c['a_nest'], {}) c.a_nest.an_egg = True ok_(c['a_nest']['an_egg'] is True)
def creates_a_new_Context_from_given_config(self): conf = Config(defaults={'foo': 'bar'}) ctx = Call(_).make_context(conf) ok_(isinstance(ctx, Context)) eq_(ctx.foo, 'bar')
def can_be_pickled(self): c = Config(overrides={'foo': {'bar': {'biz': ['baz', 'buzz']}}}) c2 = pickle.loads(pickle.dumps(c)) eq_(c, c2) ok_(c is not c2) ok_(c.foo.bar.biz is not c2.foo.bar.biz)
def _expect_attr(self, attr): c = Context() ok_(hasattr(c, attr) and callable(getattr(c, attr)))
def contextualized_tasks_are_given_parser_context_arg(self): self.executor.execute('contextualized') args = self.contextualized.body.call_args[0] eq_(len(args), 1) ok_(isinstance(args[0], Context))
def yaml_prevents_json_or_python(self): c = Config(system_prefix=join(CONFIGS_PATH, 'all-three', 'invoke')) ok_('json-only' not in c) ok_('python_only' not in c) ok_('yaml-only' in c) eq_(c.shared, 'yaml-value')
def does_not_deepcopy(self): c = Config( defaults={ # Will merge_dicts happily 'oh': { 'dear': { 'god': object() } }, # And shallow-copy compound values 'shallow': { 'objects': ['copy', 'okay'] }, # Will preserve refrences to the innermost dict, sadly. Not # much we can do without incurring deepcopy problems (or # reimplementing it entirely) 'welp': { 'cannot': ['have', { 'everything': 'we want' }] }, }) c2 = c.clone() # Basic identity ok_(c is not c2, "Clone had same identity as original!") # Dicts get recreated ok_(c.oh is not c2.oh, "Top level key had same identity!") ok_(c.oh.dear is not c2.oh.dear, "Midlevel key had same identity!") # Basic values get copied ok_(c.oh.dear.god is not c2.oh.dear.god, "Leaf object() had same identity!") eq_(c.shallow.objects, c2.shallow.objects) ok_(c.shallow.objects is not c2.shallow.objects, "Shallow list had same identity!") # Deeply nested non-dict objects are stil problematic, oh well ok_(c.welp.cannot[1] is c2.welp.cannot[1], "Huh, a deeply nested dict-in-a-list had different identity?") ok_( c.welp.cannot[1]['everything'] is c2.welp.cannot[1]['everything'], # noqa "Huh, a deeply nested dict-in-a-list value had different identity?" # noqa )