def subcollection_config_works_with_default_tasks(self): @task(default=True) def mytask(c): assert c.my_key == "value" # Sets up a task "known as" sub.mytask which may be called as # just 'sub' due to being default. sub = Collection("sub", mytask=mytask) sub.configure({"my_key": "value"}) main = Collection(sub=sub) # Execute via collection default 'task' name. Executor(collection=main).execute("sub")
def hands_task_specific_configuration_to_context(self): @task def mytask(c): assert c.my_key == "value" @task def othertask(c): assert c.my_key == "othervalue" inner1 = Collection("inner1", mytask) inner1.configure({"my_key": "value"}) inner2 = Collection("inner2", othertask) inner2.configure({"my_key": "othervalue"}) c = Collection(inner1, inner2) e = Executor(collection=c) e.execute("inner1.mytask", "inner2.othertask")
def can_grant_access_to_core_arg_parse_result(self): c = ParseResult([ParserContext(name="mytask")]) e = Executor(collection=Collection(), core=c) assert e.core is c # Sanity test of real-world access/usage assert len(e.core) == 1 assert e.core[0].name == "mytask" assert len(e.core[0].args) == 0
def hands_collection_configuration_to_context(self): @task def mytask(c): assert c.my_key == "value" c = Collection(mytask) c.configure({"my_key": "value"}) Executor(collection=c).execute("mytask")
def default_tasks_called_when_no_tasks_specified(self): # NOTE: when no tasks AND no default, Program will print global # help. We just won't do anything at all, which is fine for now. task = Task(Mock("default-task")) coll = Collection() coll.add_task(task, name="mytask", default=True) executor = Executor(collection=coll) executor.execute() args = task.body.call_args[0] assert isinstance(args[0], Context) assert len(args) == 1
def calls_default_to_empty_args_always(self): pre_body, post_body = Mock(), Mock() t1 = Task(pre_body) t2 = Task(post_body) t3 = Task(Mock(), pre=[t1], post=[t2]) e = Executor(collection=Collection(t1=t1, t2=t2, t3=t3)) e.execute(("t3", {"something": "meh"})) for body in (pre_body, post_body): args = body.call_args[0] assert len(args) == 1 assert isinstance(args[0], Context)
def setup(self): self.task1 = Task(Mock(return_value=7)) self.task2 = Task(Mock(return_value=10), pre=[self.task1]) self.task3 = Task(Mock(), pre=[self.task1]) self.task4 = Task(Mock(return_value=15), post=[self.task1]) self.contextualized = Task(Mock()) coll = Collection() coll.add_task(self.task1, name="task1") coll.add_task(self.task2, name="task2") coll.add_task(self.task3, name="task3") coll.add_task(self.task4, name="task4") coll.add_task(self.contextualized, name="contextualized") self.executor = Executor(collection=coll)
def deduping_treats_different_calls_to_same_task_differently(self): body = Mock() t1 = Task(body) pre = [call(t1, 5), call(t1, 7), call(t1, 5)] t2 = Task(Mock(), pre=pre) c = Collection(t1=t1, t2=t2) e = Executor(collection=c) e.execute("t2") # Does not call the second t1(5) param_list = [] for body_call in body.call_args_list: assert isinstance(body_call[0][0], Context) param_list.append(body_call[0][1]) assert set(param_list) == {5, 7}
def _site(name, help_part): _path = join("sites", name) # TODO: turn part of from_module into .clone(), heh. self = sys.modules[__name__] coll = Collection.from_module( self, name=name, config={"sphinx": { "source": _path, "target": join(_path, "_build") }}, ) coll.__doc__ = "Tasks for building {}".format(help_part) coll["build"].__doc__ = "Build {}".format(help_part) return coll
def config_deletion_is_preserved_between_tasks(self): @task def task1(c): del c.config.run.echo # NOTE: returned for test inspection, not as mechanism of # sharing data! return c @task def task2(c): return c coll = Collection(task1, task2) ret = Executor(collection=coll).execute("task1", "task2") c2 = ret[task2] assert "echo" not in c2.config.run
def context_is_new_but_config_is_same(self): @task def task1(c): return c @task def task2(c): return c coll = Collection(task1, task2) ret = Executor(collection=coll).execute("task1", "task2") c1 = ret[task1] c2 = ret[task2] assert c1 is not c2 # TODO: eventually we may want to change this again, as long as the # effective values within the config are still matching...? Ehh assert c1.config is c2.config
def new_config_data_is_preserved_between_tasks(self): @task def task1(c): c.foo = "bar" # NOTE: returned for test inspection, not as mechanism of # sharing data! return c @task def task2(c): return c coll = Collection(task1, task2) ret = Executor(collection=coll).execute("task1", "task2") c2 = ret[task2] assert "foo" in c2.config assert c2.foo == "bar"
def _call_objs(self): # Setup pre_body, post_body = Mock(), Mock() t1 = Task(pre_body) t2 = Task(post_body) t3 = Task( Mock(), pre=[call(t1, 5, foo="bar")], post=[call(t2, 7, biz="baz")], ) c = Collection(t1=t1, t2=t2, t3=t3) e = Executor(collection=c) e.execute("t3") # Pre-task asserts args, kwargs = pre_body.call_args assert kwargs == {"foo": "bar"} assert isinstance(args[0], Context) assert args[1] == 5 # Post-task asserts args, kwargs = post_body.call_args assert kwargs == {"biz": "baz"} assert isinstance(args[0], Context) assert args[1] == 7
from raft import task, Collection @task def z_toplevel(c): pass @task def subtask(c): pass ns = Collection(z_toplevel, Collection("a", Collection("b", subtask)))
"installed to GPG-sign!") for archive in archives: cmd = "{0} --detach-sign -a --passphrase-fd 0 {{0}}".format( gpg_bin) # noqa c.run(cmd.format(archive), in_stream=input_) input_.seek(0) # So it can be replayed by subsequent iterations # Upload parts = ["twine", "upload"] if index: index_arg = "--repository {0}".format(index) if index: parts.append(index_arg) paths = archives[:] if sign: paths.append(os.path.join(directory, "dist", "*.asc")) parts.extend(paths) cmd = " ".join(parts) if dry_run: print("Would publish via: {0}".format(cmd)) print("Files that would be published:") c.run("ls -l {0}".format(" ".join(paths))) else: c.run(cmd) # Stitch together current partway-rewritten stuff into public namespace. # TODO: reconsider once fully done; may end up looking a lot like this anyways. ns = Collection("release", all_, status, prepare, build, publish) # Hide stdout by default, preferring to explicitly enable it when necessary. ns.configure({"run": {"hide": "stdout"}})
from raft import ctask, Collection @ctask def go(c): c.run("false") # Ensures a kaboom if mocking fails ns = Collection(go) ns.configure({"run": {"echo": True}})
from raft import task, Collection @task def dummy(c): pass ns = Collection(dummy, Collection("subcollection"))
from raft import task, Collection @task def toplevel(c): pass @task def subtask(c): pass ns = Collection(toplevel, Collection("a", subtask, Collection("nother", subtask)))
from raft import Collection, task, call from package import module @task def top_pre(c): pass @task(call(top_pre)) def toplevel(c): pass ns = Collection(module, toplevel)
def core_arg_parse_result_defaults_to_None(self): assert Executor(collection=Collection()).core is None
def allows_collection_and_config(self): coll = Collection() conf = Config() e = Executor(collection=coll, config=conf) assert e.collection is coll assert e.config is conf
build(c, clean=True, target=target, opts=opts) finally: rmtree(tmpdir) @task def tree(c): """ Display documentation contents with the 'tree' program. """ ignore = ".git|*.pyc|*.swp|dist|*.egg-info|_static|_build|_templates" c.run('tree -Ca -I "{0}" {1}'.format(ignore, c.sphinx.source)) # Vanilla/default/parameterized collection for normal use ns = Collection(_clean, _browse, build, tree, doctest) ns.configure({ "sphinx": { "source": "docs", # TODO: allow lazy eval so one attr can refer to another? "target": join("docs", "_build"), "target_file": "index.html", } }) # Multi-site variants, used by various projects (fabric, raft, paramiko) # Expects a tree like sites/www/<sphinx> + sites/docs/<sphinx>, # and that you want 'inline' html build dirs, e.g. sites/www/_build/index.html. def _site(name, help_part):
from raft import task, Collection @task def mytask(c): assert c.outer.inner.hooray == "yml" ns = Collection(mytask)
from raft import task, Collection from . import build, deploy, provision @task(aliases=["ipython"]) def shell(c): "Load a REPL with project state already set up." pass @task(aliases=["run_tests"], default=True) def test(c): "Run the test suite with baked-in args." pass # NOTE: using build's internal collection directly as a way of ensuring a # corner case (collection 'named' via local kwarg) gets tested for --list. # NOTE: Docstring cloning in effect to preserve the final organic looking # result... localbuild = build.ns localbuild.__doc__ = build.__doc__ ns = Collection(shell, test, deploy, provision, build=localbuild)
"Tasks for compiling static code and assets." from raft import task, Collection from . import docs, python @task(name="all", aliases=["everything"], default=True) def all_(c): "Build all necessary artifacts." pass @task(aliases=["ext"]) def c_ext(c): "Build our internal C extension." pass @task def zap(c): "A silly way to clean." pass ns = Collection(all_, c_ext, zap, docs, python)
def uses_blank_config_by_default(self): e = Executor(collection=Collection()) assert isinstance(e.config, Config)
def _load(self, name): mod, _ = self.loader.load(name) return Collection.from_module(mod)
Run an expensive, hard-to-test-in-pytest run() regression checker. :param int jobs: Number of jobs to run, in total. Ideally num of CPUs. """ os.chdir("integration/_support") cmd = "seq {} | parallel -n0 --halt=now,fail=1 inv -c regression check" c.run(cmd.format(jobs)) ns = Collection( test, coverage, integration, regression, vendorize, release, www, docs, sites, watch_docs, travis, checks.blacken, ) ns.configure({ "blacken": { # Skip the vendor directory and the (Travis-only) alt venv when # blackening. # TODO: this is making it seem like I really do want an explicit # arg/conf-opt in the blacken task for "excluded paths"...ha "find_opts": "-and -not \( -path './raft/vendor*' -or -path './alt_env*' -or -path './build*' \)" # noqa },