def test_recompute_till(): def by2(n): return 2 * n pipe = compose( ..., operation(by2, "f0", "a0", "a1"), operation(by2, "f1", "a1", "a2"), operation(by2, "f2", "a2", "a3"), operation(by2, "f3", "a3", "a4"), ) sol = pipe(a0=1) assert exe_ops(sol) == ["f0", "f1", "f2", "f3"] assert sol == {"a0": 1, "a1": 2, "a2": 4, "a3": 8, "a4": 16} inp = dict(sol) inp["a1"] = 3 sol = pipe.compute(inp, outputs="a3", recompute_from="a1") assert exe_ops(sol) == ["f1", "f2"] assert sol == {"a3": 12} with evictions_skipped(True): sol = pipe.compute(inp, outputs="a3", recompute_from="a1") assert exe_ops(sol) == ["f1", "f2"] assert sol == {"a0": 1, "a1": 3, "a2": 6, "a3": 12, "a4": 16}
def test_sideffect_not_canceled_if_not_resched(exemethod): # Check op without any provides # an_sfx = sfx("b") op1 = operation(lambda: {an_sfx: False}, name="op1", provides=an_sfx, returns_dict=True) op2 = operation(lambda: 1, name="op2", needs=an_sfx, provides="b") pipeline = compose("t", op1, op2, parallel=exemethod) # sol = pipeline.compute() # assert sol == {an_sfx: False, "b": 1} sol = pipeline.compute(outputs="b") assert sol == {"b": 1} # Check also op with some provides # an_sfx = sfx("b") op1 = operation( lambda: { "a": 1, an_sfx: False }, name="op1", provides=["a", an_sfx], returns_dict=True, ) op2 = operation(lambda: 1, name="op2", needs=an_sfx, provides="b") pipeline = compose("t", op1, op2, parallel=exemethod) sol = pipeline.compute() assert sol == {"a": 1, an_sfx: False, "b": 1} sol = pipeline.compute(outputs="b") assert sol == {"b": 1}
def test_jsonp_and_conveyor_fn_complex_NOT_LAYERED(solution_layered_false): pipe = compose( "t", operation( name="op1", needs=["i/a", "i/a"], # dupe jsonp needs provides=["r/a", modify("a")], )(), operation(lambda x: (x, 2 * x), name="op2", needs=["r/a"], provides=["r/A", "r/AA"]), ) inp = {"i": {"a": 1}} sol = pipe.compute(inp, layered_solution=solution_layered_false) assert sol == {**inp, "r": {"a": 1, "A": 1, "AA": 2}, "a": 1} sol = pipe.compute(inp, outputs="r", layered_solution=solution_layered_false) assert sol == {"r": {"a": 1, "A": 1, "AA": 2}} sol = pipe.compute(inp, outputs=["r/A", "r/AA"], layered_solution=solution_layered_false) assert sol == { "r": { "a": 1, "A": 1, "AA": 2 } } ## FIXME: should have evicted r/a! sol = pipe.compute(inp, outputs="r/AA", layered_solution=solution_layered_false) assert sol == {"r": {"a": 1, "AA": 2}} ## FIXME: should have evicted r/a!
def test_combine_clusters(): p1 = compose( "op1", operation(lambda a, b: None, name="op1", needs=["a", "b"], provides=["ab"]), operation(lambda a, b: None, name="op2", needs=["a", "ab"], provides="c"), operation(lambda a: None, name="op3", needs="c", provides="C"), ) p2 = compose( "op2", operation(lambda a, b: None, name="op1", needs=["a", "b"], provides=["ab"]), operation(lambda a, b: None, name="op2", needs=["c", "ab"], provides=["cab"]), ) merged_graph = compose("m", p1, p2, nest=True) dot: pydot.Dot = merged_graph.plot() assert dot.get_subgraph(f"cluster_{p1.name}") assert dot.get_subgraph(f"cluster_{p2.name}")
def test_aliases_pipeline(exemethod): provides = ("a", sfxed("s", "foo")) aliased = operation( lambda: ("A", "B"), name="op1", provides=provides, aliases={ "a": "b", "s": "S1" }, ) assert aliased._user_provides == provides assert tuple(aliased.provides) == ( "a", sfxed("s", "foo"), "b", "S1", ) pipe = compose( "test_net", aliased, operation(lambda x: x * 2, name="op2", needs="b", provides="c"), parallel=exemethod, ) assert pipe() == {"a": "A", "s": "B", "b": "A", "S1": "B", "c": "AA"} assert list(pipe.provides) == [*aliased.provides, "c"]
def test_sideffect_real_input(reverse, exemethod): sidefx_fail = is_marshal_tasks() and not isinstance( get_execution_pool(), types.FunctionType # mp_dummy.Pool ) ops = [ operation(name="extend", needs=["box", "a"], provides=[sfx("b")])(_box_extend), operation(name="increment", needs=["box", sfx("b")], provides="c")(_box_increment), ] if reverse: ops = reversed(ops) # Designate `a`, `b` as sideffect inp/out arguments. graph = compose("mygraph", *ops, parallel=exemethod) box_orig = [0] assert graph(**{ "box": [0], "a": True }) == { "a": True, "box": box_orig if sidefx_fail else [1, 2, 3], "c": None, } assert graph.compute({ "box": [0], "a": True }, ["box", "c"]) == { "box": box_orig if sidefx_fail else [1, 2, 3], "c": None, }
def test_pandas_input(): ser = pd.Series([1, 2]) sol = operation(fn=None, name="pandas", needs="a", provides="A")(a=ser) assert (sol["A"] == ser).all() sol = operation(fn=None, name="pandas", needs="a", provides="A").compute({"a": ser}) assert (sol["A"] == ser).all()
def test_compose_rename_dict(caplog): pip = compose( "t", operation(str, "op1", provides=["a", "aa"]), operation( str, "op2", needs="a", provides=["b", sfx("c")], aliases=[("b", "B"), ("b", "p")], ), nest={ "op1": "OP1", "op2": lambda n: "OP2", "a": "A", "b": "bb" }, ) print(str(pip)) assert str(pip) == ( "Pipeline('t', needs=['A'], " "provides=['A', 'aa', 'bb', sfx('c'), 'B', 'p'], x2 ops: OP1, OP2)") print(str(pip.ops)) assert (str(pip.ops) == dedent(""" [FnOp(name='OP1', provides=['A', 'aa'], fn='str'), FnOp(name='OP2', needs=['A'], provides=['bb', sfx('c'), 'B', 'p'], aliases=[('bb', 'B'), ('bb', 'p')], fn='str')] """).replace("\n", ""))
def test_pipeline_node_props(): op1 = operation(lambda: None, name="a", node_props={ "a": 11, "b": 0, "bb": 2 }) op2 = operation(lambda: None, name="b", node_props={"a": 3, "c": 4}) pipeline = compose("n", op1, op2, node_props={"bb": 22, "c": 44}) exp = { "a": { "typ": 1, "a": 11, "b": 0, "bb": 22, "c": 44 }, "b": { "typ": 1, "a": 3, "bb": 22, "c": 44 }, } node_props = _collect_op_props(pipeline) assert node_props == exp # Check node-prop sideffects are not modified # assert op1.node_props == {"a": 11, "b": 0, "bb": 2} assert op2.node_props == {"a": 3, "c": 4}
def test_unsatisfied_operations(exemethod): # Test that operations with partial inputs are culled and not failing. pipeline = compose( "pipeline", operation(name="add", needs=["a", "b1"], provides=["a+b1"])(add), operation(name="sub", needs=["a", "b2"], provides=["a-b2"])(sub), parallel=exemethod, ) exp = {"a": 10, "b1": 2, "a+b1": 12} assert pipeline(**{"a": 10, "b1": 2}) == exp assert pipeline.compute({ "a": 10, "b1": 2 }, ["a+b1"]) == filtdict(exp, "a+b1") assert pipeline.withset(outputs=["a+b1"])(**{ "a": 10, "b1": 2 }) == filtdict(exp, "a+b1") exp = {"a": 10, "b2": 2, "a-b2": 8} assert pipeline(**{"a": 10, "b2": 2}) == exp assert pipeline.compute({ "a": 10, "b2": 2 }, ["a-b2"]) == filtdict(exp, "a-b2")
def test_unsatisfied_operations_same_out(exemethod): # Test unsatisfied pairs of operations providing the same output. pipeline = compose( "pipeline", operation(name="mul", needs=["a", "b1"], provides=["ab"])(mul), operation(name="div", needs=["a", "b2"], provides=["ab"])(floordiv), operation(name="add", needs=["ab", "c"], provides=["ab_plus_c"])(add), parallel=exemethod, ) # Parallel FAIL! in #26 exp = {"a": 10, "b1": 2, "c": 1, "ab": 20, "ab_plus_c": 21} assert pipeline(**{"a": 10, "b1": 2, "c": 1}) == exp assert pipeline.compute({ "a": 10, "b1": 2, "c": 1 }, ["ab_plus_c"]) == filtdict(exp, "ab_plus_c") # Parallel FAIL! in #26 exp = {"a": 10, "b2": 2, "c": 1, "ab": 5, "ab_plus_c": 6} assert pipeline(**{"a": 10, "b2": 2, "c": 1}) == exp assert pipeline.compute({ "a": 10, "b2": 2, "c": 1 }, ["ab_plus_c"]) == filtdict(exp, "ab_plus_c")
def test_pruning_multiouts_not_override_intermediates2(exemethod): pipeline = compose( "pipeline", operation(name="must run", needs=["a"], provides=["overridden", "e"])(lambda x: (x, 2 * x)), operation(name="op1", needs=["overridden", "c"], provides=["d"])(add), operation(name="op2", needs=["d", "e"], provides=["asked"])(mul), parallel=exemethod, ) inputs = {"a": 5, "overridden": 1, "c": 2} exp = {"a": 5, "overridden": 5, "c": 2, "e": 10, "d": 7, "asked": 70} assert pipeline(**inputs) == exp assert pipeline.compute(inputs, "asked") == filtdict(exp, "asked") ## Test OVERWRITES # solution = pipeline.compute(inputs) assert solution == exp assert solution.overwrites == {"overridden": [5, 1]} # No overwrites when evicted. # solution = pipeline.compute(inputs, "asked") assert solution == filtdict(exp, "asked") assert solution.overwrites == {} # ... but overwrites collected if asked. # solution = pipeline.compute(inputs, ["asked", "overridden"]) assert solution == filtdict(exp, "asked", "overridden") assert solution.overwrites == {"overridden": [5, 1]}
def test_pruning_with_given_intermediate_and_asked_out(exemethod): # Test #24: v1.2.4 does not prune before given intermediate data when # outputs not asked, but does so when output asked. pipeline = compose( "pipeline", operation(name="unjustly pruned", needs=["given-1"], provides=["a"])(identity), operation(name="shortcut-ed", needs=["a", "b"], provides=["given-2"])(add), operation(name="good_op", needs=["a", "given-2"], provides=["asked"])(add), parallel=exemethod, ) inps = {"given-1": 5, "b": 2, "given-2": 2} exp = {"given-1": 5, "given-2": 2, "a": 5, "b": 2, "asked": 7} # v1.2.4 is ok assert pipeline(**inps) == exp # FAILS # - on v1.2.4 with KeyError: 'a', # - on #18 (unsatisfied) with no result. # FIXED on #18+#26 (new dag solver). assert pipeline.compute(inps, "asked") == filtdict(exp, "asked") ## Test OVERWRITES # solution = pipeline.compute(inps) assert solution == exp assert solution.overwrites == {} solution = pipeline.compute(inps, "asked") assert solution == filtdict(exp, "asked") assert solution.overwrites == {}
def test_pruning_not_overrides_given_intermediate(exemethod): # Test #25: v1.2.4 overwrites intermediate data when no output asked pipeline = compose( "pipeline", operation(name="not run", needs=["a"], provides=["overridden"])(scream), operation(name="op", needs=["overridden", "c"], provides=["asked"])(add), parallel=exemethod, ) inputs = {"a": 5, "overridden": 1, "c": 2} exp = {"a": 5, "overridden": 1, "c": 2, "asked": 3} # v1.2.4.ok assert pipeline.compute(inputs, "asked") == filtdict(exp, "asked") # FAILs # - on v1.2.4 with (overridden, asked): = (5, 7) instead of (1, 3) # - on #18(unsatisfied) + #23(ordered-sets) with (overridden, asked) = (5, 7) instead of (1, 3) # FIXED on #26 assert pipeline(**inputs) == exp ## Test OVERWRITES # solution = pipeline.compute(inputs, ["asked"]) assert solution == filtdict(exp, "asked") assert solution.overwrites == {} # unjust must have been pruned solution = pipeline(**inputs) assert solution == exp assert solution.overwrites == {} # unjust must have been pruned
def test_step_badge(): p = compose( "", operation(str, "0", provides="b"), operation(str, "1", needs="b", provides="c"), ) sol = p.compute(outputs="c") dot_str = str(sol.plot()) print(dot_str) exp = r""" digraph solution_x4_nodes { fontname=italic; node [fillcolor=white, style=filled]; <0> [label=<<TABLE CELLBORDER="0" CELLSPACING="0" STYLE="rounded" BGCOLOR="wheat"> <TR> <TD BORDER="1" SIDES="b" ALIGN="left" TOOLTIP="FnOp(name='0', provides=['b'], fn='str')" TARGET="_top" ><B>OP:</B> <I>0</I></TD> <TD BORDER="1" SIDES="b" ALIGN="right"><TABLE BORDER="0" CELLBORDER="0" CELLSPACING="1" CELLPADDING="2" ALIGN="right"> <TR> <TD STYLE="rounded" HEIGHT="22" WIDTH="14" FIXEDSIZE="true" VALIGN="BOTTOM" BGCOLOR="#00bbbb" TITLE="computation order" HREF="https://graphtik.readthedocs.io/en/latest/arch.html#term-steps" TARGET="_top"><FONT FACE="monospace" COLOR="white"><B>0</B></FONT></TD> </TR> </TABLE></TD> </TR> <TR> <TD COLSPAN="2" ALIGN="left" TARGET="_top" ><B>FN:</B> builtins.str</TD> </TR> </TABLE>>, shape=plain, tooltip=<0>]; <b> [color="#006666", fixedsize=shape, label=<<TABLE CELLBORDER="0" CELLSPACING="0" BORDER="0"> <TR> <TD STYLE="rounded" CELLSPACING="2" CELLPADDING="4" BGCOLOR="#00bbbb" TITLE="computation order" HREF="https://graphtik.readthedocs.io/en/latest/arch.html#term-steps" TARGET="_top" ><FONT FACE="monospace" COLOR="white"><B>2</B></FONT></TD><TD>b</TD> </TR> </TABLE>>, penwidth=3, shape=rect, style="filled,dashed", tooltip="(to evict)\n(evicted)"]; <1> [label=<<TABLE CELLBORDER="0" CELLSPACING="0" STYLE="rounded" BGCOLOR="wheat"> <TR> <TD BORDER="1" SIDES="b" ALIGN="left" TOOLTIP="FnOp(name='1', needs=['b'], provides=['c'], fn='str')" TARGET="_top" ><B>OP:</B> <I>1</I></TD> <TD BORDER="1" SIDES="b" ALIGN="right"><TABLE BORDER="0" CELLBORDER="0" CELLSPACING="1" CELLPADDING="2" ALIGN="right"> <TR> <TD STYLE="rounded" HEIGHT="22" WIDTH="14" FIXEDSIZE="true" VALIGN="BOTTOM" BGCOLOR="#00bbbb" TITLE="computation order" HREF="https://graphtik.readthedocs.io/en/latest/arch.html#term-steps" TARGET="_top"><FONT FACE="monospace" COLOR="white"><B>1</B></FONT></TD> </TR> </TABLE></TD> </TR> <TR> <TD COLSPAN="2" ALIGN="left" TARGET="_top" ><B>FN:</B> builtins.str</TD> </TR> </TABLE>>, shape=plain, tooltip=<1>]; <c> [fillcolor=wheat, fixedsize=shape, label=<<TABLE CELLBORDER="0" CELLSPACING="0" BORDER="0"> <TR><TD>c</TD> </TR> </TABLE>>, shape=house, style=filled, tooltip="(output)\n(str)"]; <0> -> <b> [headport=n, tailport=s]; <b> -> <1> [arrowtail=inv, dir=back, headport=n, tailport=s]; <1> -> <c> [headport=n, tailport=s]; legend [URL="https://graphtik.readthedocs.io/en/latest/_images/GraphtikLegend.svg", fillcolor=yellow, shape=component, style=filled, target=_blank]; } """ assert _striplines(dot_str) == _striplines(exp)
def test_node_predicate_based_prune(): pipeline = compose( "N", operation(name="A", needs=["a"], provides=["aa"], node_props={"color": "red"})(identity), operation(name="B", needs=["b"], provides=["bb"], node_props={"color": "green"})(identity), operation(name="C", needs=["c"], provides=["cc"])(identity), operation( name="SUM", needs=[optional(i) for i in ("aa", "bb", "cc")], provides=["sum"], )(addall), ) inp = {"a": 1, "b": 2, "c": 3} assert pipeline(**inp)["sum"] == 6 assert len(pipeline.net.graph.nodes) == 11 pred = lambda n, d: d.get("color", None) != "red" assert pipeline.withset(predicate=pred)(**inp)["sum"] == 5 assert len(pipeline.withset(predicate=pred).compile().dag.nodes) == 9 pred = lambda n, d: "color" not in d assert pipeline.withset(predicate=pred)(**inp)["sum"] == 3 assert len(pipeline.withset(predicate=pred).compile().dag.nodes) == 7
def test_op_node_props(): op_factory = operation(lambda: None, name="a", node_props=()) assert op_factory.node_props == {} np = {"a": 1} op = operation(lambda: None, name="a", node_props=np) assert op.node_props == np
def samplenet(): """sum1 = (a + b), sum2 = (c + d), sum3 = c + (c + d)""" sum_op1 = operation(name="sum_op1", needs=["a", "b"], provides="sum1")(add) sum_op2 = operation(name="sum_op2", needs=["c", "d"], provides="sum2")(add) sum_op3 = operation(name="sum_op3", needs=["c", "sum2"], provides="sum3")(add) return compose("test_net", sum_op1, sum_op2, sum_op3)
def pipeline(): return compose( "pipeline", operation(name="add", needs=["a", "b1"], provides=["ab1"])(add), operation(name="sub", needs=["a", optional("b2")], provides=["ab2"])(lambda a, b=1: a - b), operation(name="abb", needs=["ab1", "ab2"], provides=["asked"])(add), )
def test_compose_rename_preserve_ops(caplog): pip = compose( "t", operation(str, "op1"), operation(str, "op2"), nest=lambda na: f"aa.{na.name}", ) assert str(pip) == "Pipeline('t', x2 ops: aa.op1, aa.op2)"
def test_narrow_and_optionality(reverse): op1 = operation(name="op1", needs=[optional("a"), optional("bb")], provides="sum1")(addall) op2 = operation(name="op2", needs=["a", optional("bb")], provides="sum2")(addall) ops = [op1, op2] provides = "'sum1', 'sum2'" if reverse: ops = list(reversed(ops)) provides = "'sum2', 'sum1'" pipeline_str = f"Pipeline('t', needs=['a', 'bb'(?)], provides=[{provides}], x2 ops" pipeline = compose("t", *ops) assert repr(pipeline).startswith(pipeline_str) ## IO & predicate do not affect network, but solution. ## Compose with `inputs` # pipeline = compose("t", *ops) assert repr(pipeline).startswith(pipeline_str) assert repr(pipeline.compile("a")).startswith( f"ExecutionPlan(needs=['a'], provides=[{provides}], x2 steps:") # pipeline = compose("t", *ops) assert repr(pipeline).startswith(pipeline_str) assert repr(pipeline.compile(["bb"])).startswith( "ExecutionPlan(needs=['bb'(?)], provides=['sum1'], x1 steps:") ## Narrow by `provides` # pipeline = compose("t", *ops, outputs="sum1") assert repr(pipeline).startswith(pipeline_str) assert repr(pipeline.compile("bb")).startswith( "ExecutionPlan(needs=['bb'(?)], provides=['sum1'], x3 steps:") assert repr(pipeline.compile("bb")) == repr( pipeline.compute({ "bb": 1 }).plan) pipeline = compose("t", *ops, outputs=["sum2"]) assert repr(pipeline).startswith(pipeline_str) assert not pipeline.compile("bb").steps assert len(pipeline.compile("a").steps) == 3 assert repr(pipeline.compile("a")).startswith( "ExecutionPlan(needs=['a'], provides=['sum2'], x3 steps:") ## Narrow by BOTH # pipeline = compose("t", *ops, outputs=["sum1"]) assert repr(pipeline.compile("a")).startswith( "ExecutionPlan(needs=['a'(?)], provides=['sum1'], x3 steps:") pipeline = compose("t", *ops, outputs=["sum2"]) with pytest.raises(ValueError, match="Unsolvable graph:"): pipeline.compute({"bb": 11})
def test_skip_eviction_flag(): graph = compose( "graph", operation(name="add1", needs=["a", "b"], provides=["ab"])(add), operation(name="add2", needs=["a", "ab"], provides=["aab"])(add), ) with evictions_skipped(True): exp = {"a": 1, "b": 3, "ab": 4, "aab": 5} assert graph.compute({"a": 1, "b": 3}, "aab") == exp
def test_returns_dict(result, dictres): if dictres is ...: dictres = result op = operation(lambda: result, provides=dictres.keys(), returns_dict=True) assert op.compute({}) == dictres op = operation(lambda: result, provides="a", returns_dict=False) assert op.compute({})["a"] == result
def test_cycle_tip(): pipe = compose(..., operation(str, "cyclic1", "a", "a")) with pytest.raises(nx.NetworkXUnfeasible, match="TIP:"): pipe.compute() pipe = compose(..., operation(str, "cyclic1", "a", "b"), operation(str, "cyclic2", "b", "a")) with evictions_skipped(True), pytest.raises(nx.NetworkXUnfeasible, match="TIP:"): pipe.compute()
def test_combine_networks(exemethod, bools): # Code from `compose.rst` examples if not exemethod: return parallel1 = bools >> 0 & 1 parallel2 = bools >> 1 & 1 graphop = compose( "graphop", operation(name="mul1", needs=["a", "b"], provides=["ab"])(mul), operation(name="sub1", needs=["a", "ab"], provides=["a-ab"])(sub), operation(name="abspow1", needs=["a-ab"], provides=["|a-ab|³"])(partial(abspow, p=3)), parallel=parallel1, ) assert graphop.compute({"a-ab": -8}) == {"a-ab": -8, "|a-ab|³": 512} bigger_graph = compose( "bigger_graph", graphop, operation(name="sub2", needs=["a-ab", "c"], provides="a-ab_minus_c")(sub), parallel=parallel2, nest=lambda ren_args: ren_args.typ == "op", ) ## Ensure all old-nodes were prefixed. # old_nodes = graphop.net.graph.nodes new_nodes = bigger_graph.net.graph.nodes for old_node in old_nodes: if isinstance(old_node, Operation): assert old_node not in new_nodes else: assert old_node in new_nodes sol = bigger_graph.compute({"a": 2, "b": 5, "c": 5}, ["a-ab_minus_c"]) assert sol == {"a-ab_minus_c": -13} ## Test Plots ## Ensure all old-nodes were prefixed. # # Access all nodes from Network, where no "after pruning" cluster exists. old_nodes = [n for n in graphop.net.plot().get_nodes()] new_node_names = [ n.get_name() for n in bigger_graph.net.plot().get_nodes() ] for old_node in old_nodes: if old_node.get_shape() == "plain": # Operation assert old_node.get_name() not in new_node_names else: # legend-node included here.` assert old_node.get_name() in new_node_names
def dot_str_pipeline(): return compose( "graph", operation(name="node", needs=["edge", "digraph: strict"], provides=["<graph>"])(add), operation(name="cu:sto:m", needs=["edge", "digraph: strict"], provides=["<graph>"])(func), )
def test_returns_dict_keyword_renames(): res = {"1": 1, "2": 2} op = operation(lambda: res, provides=keyword("11", "1"), returns_dict=True) assert op.compute({}) == {"11": 1} assert len(res) == 2 # check it did not mutate results res = {"1": 1, "11": 11} op = operation(lambda: res, provides=keyword("11", "1"), returns_dict=True) assert op.compute({}) == {"11": 1} # original '11' was discarded assert len(res) == 2 # check it did not mutate results
def test_node_edge_defaults(): dot = compose("defs", operation(str, "a", "b")).plot( theme={"node_defaults": {"color": "red"}} ) assert "color=red" in str(dot) dot = compose("defs", operation(str, "a", "b")).plot( theme={"edge_defaults": {"color": "red"}} ) assert "color=red" in str(dot)
def test_impossible_outputs(): pipeline = compose( "test_net", operation(name="op1", needs=["a"], provides="aa")(identity), operation(name="op2", needs=["aa", "bb"], provides="aabb")(identity), ) with pytest.raises(ValueError, match="Unreachable outputs"): pipeline.compute({"a": 1}, ["aabb"]) with pytest.raises(ValueError, match="Unreachable outputs"): pipeline.compute({"a": 1}, ["aa", "aabb"])
def test_non_op_given(): with pytest.raises( TypeError, match=r"(?s)Received x1 non-Operation instances: \[1\]"): compose("", operation(None, name="b"), 1, operation(None, name="a")) with pytest.raises( TypeError, match= r"(?s)Received x2 non-Operation instances: \['noop', <function .+>\]", ): compose("", "noop", operation(None, name="b"), lambda: None)