Example #1
0
def test_pipeline_conveys_attr_to_ops(attr, value):
    def _opsattrs(ops, attr, value):
        vals = [getattr(op, attr) for op in ops if isinstance(op, Operation)]
        assert all(v == value for v in vals)

    kw = {attr: value}
    _opsattrs(compose("1", operation(str), **kw).net.graph, attr, value)
    _opsattrs(
        compose("2", operation(str, name="1"), operation(str, name="2"),
                **kw).net.graph,
        attr,
        value,
    )
Example #2
0
def test_compose_rename_bad_screamy(caplog):
    def screamy_nester(ren_args):
        raise RuntimeError("Bluff")

    with pytest.raises(RuntimeError, match="Bluff"):
        compose(
            "test_nest_err",
            operation(str, "op1"),
            operation(str, "op2"),
            nest=screamy_nester,
        )
    for record in caplog.records:
        if record.levelname == "WARNING":
            assert "name='op1', parent=None)" in record.message
Example #3
0
def test_compose_nest_dict(caplog):
    pipe = compose(
        "t",
        compose(
            "p1",
            operation(
                str,
                name="op1",
                needs=[sfx("a"), "aa"],
                provides=[sfxed("S1", "g"), sfxed("S2", "h")],
            ),
        ),
        compose(
            "p2",
            operation(
                str,
                name="op2",
                needs=sfx("a"),
                provides=["a", sfx("b")],
                aliases=[("a", "b")],
            ),
        ),
        nest={
            "op1": True,
            "op2": lambda n: "p2.op2",
            "aa": False,
            sfx("a"): True,
            "b": lambda n: f"PP.{n}",
            sfxed("S1", "g"): True,
            sfxed("S2", "h"): lambda n: dep_renamed(n, "ss2"),
            sfx("b"): True,
        },
    )
    got = str(pipe.ops)
    print(got)
    assert got == re.sub(
        r"[\n ]{2,}",  # collapse all space-chars into a single space
        " ",
        """
        [FnOp(name='p1.op1', needs=[sfx('p1.a'), 'aa'],
         provides=[sfxed('p1.S1', 'g'), sfxed('ss2', 'h')], fn='str'),
        FnOp(name='p2.op2', needs=[sfx('p2.a')],
         provides=['a', sfx('p2.b'), 'PP.b'], aliases=[('a', 'PP.b')], fn='str')]

        """.strip(),
    )
    for record in caplog.records:
        assert record.levelname != "WARNING"
Example #4
0
def test_jsonp_and_conveyor_fn_complex_NOT_LAYERED(solution_layered_false):
    pipe = compose(
        "t",
        operation(
            name="op1",
            needs=["i/a", "i/a"],  # dupe jsonp needs
            provides=["r/a", modify("a")],
        )(),
        operation(lambda x: (x, 2 * x),
                  name="op2",
                  needs=["r/a"],
                  provides=["r/A", "r/AA"]),
    )
    inp = {"i": {"a": 1}}
    sol = pipe.compute(inp, layered_solution=solution_layered_false)
    assert sol == {**inp, "r": {"a": 1, "A": 1, "AA": 2}, "a": 1}
    sol = pipe.compute(inp,
                       outputs="r",
                       layered_solution=solution_layered_false)
    assert sol == {"r": {"a": 1, "A": 1, "AA": 2}}
    sol = pipe.compute(inp,
                       outputs=["r/A", "r/AA"],
                       layered_solution=solution_layered_false)
    assert sol == {
        "r": {
            "a": 1,
            "A": 1,
            "AA": 2
        }
    }  ## FIXME: should have evicted r/a!
    sol = pipe.compute(inp,
                       outputs="r/AA",
                       layered_solution=solution_layered_false)
    assert sol == {"r": {"a": 1, "AA": 2}}  ## FIXME: should have evicted r/a!
Example #5
0
def test_unsatisfied_operations_same_out(exemethod):
    # Test unsatisfied pairs of operations providing the same output.
    pipeline = compose(
        "pipeline",
        operation(name="mul", needs=["a", "b1"], provides=["ab"])(mul),
        operation(name="div", needs=["a", "b2"], provides=["ab"])(floordiv),
        operation(name="add", needs=["ab", "c"], provides=["ab_plus_c"])(add),
        parallel=exemethod,
    )

    #  Parallel FAIL! in #26
    exp = {"a": 10, "b1": 2, "c": 1, "ab": 20, "ab_plus_c": 21}
    assert pipeline(**{"a": 10, "b1": 2, "c": 1}) == exp
    assert pipeline.compute({
        "a": 10,
        "b1": 2,
        "c": 1
    }, ["ab_plus_c"]) == filtdict(exp, "ab_plus_c")

    #  Parallel FAIL! in #26
    exp = {"a": 10, "b2": 2, "c": 1, "ab": 5, "ab_plus_c": 6}
    assert pipeline(**{"a": 10, "b2": 2, "c": 1}) == exp
    assert pipeline.compute({
        "a": 10,
        "b2": 2,
        "c": 1
    }, ["ab_plus_c"]) == filtdict(exp, "ab_plus_c")
Example #6
0
def test_unsatisfied_operations(exemethod):
    # Test that operations with partial inputs are culled and not failing.
    pipeline = compose(
        "pipeline",
        operation(name="add", needs=["a", "b1"], provides=["a+b1"])(add),
        operation(name="sub", needs=["a", "b2"], provides=["a-b2"])(sub),
        parallel=exemethod,
    )

    exp = {"a": 10, "b1": 2, "a+b1": 12}
    assert pipeline(**{"a": 10, "b1": 2}) == exp
    assert pipeline.compute({
        "a": 10,
        "b1": 2
    }, ["a+b1"]) == filtdict(exp, "a+b1")
    assert pipeline.withset(outputs=["a+b1"])(**{
        "a": 10,
        "b1": 2
    }) == filtdict(exp, "a+b1")

    exp = {"a": 10, "b2": 2, "a-b2": 8}
    assert pipeline(**{"a": 10, "b2": 2}) == exp
    assert pipeline.compute({
        "a": 10,
        "b2": 2
    }, ["a-b2"]) == filtdict(exp, "a-b2")
Example #7
0
def test_pruning_with_given_intermediate_and_asked_out(exemethod):
    # Test #24: v1.2.4 does not prune before given intermediate data when
    # outputs not asked, but does so when output asked.
    pipeline = compose(
        "pipeline",
        operation(name="unjustly pruned", needs=["given-1"],
                  provides=["a"])(identity),
        operation(name="shortcut-ed", needs=["a", "b"],
                  provides=["given-2"])(add),
        operation(name="good_op", needs=["a", "given-2"],
                  provides=["asked"])(add),
        parallel=exemethod,
    )

    inps = {"given-1": 5, "b": 2, "given-2": 2}
    exp = {"given-1": 5, "given-2": 2, "a": 5, "b": 2, "asked": 7}

    # v1.2.4 is ok
    assert pipeline(**inps) == exp
    # FAILS
    # - on v1.2.4 with KeyError: 'a',
    # - on #18 (unsatisfied) with no result.
    # FIXED on #18+#26 (new dag solver).
    assert pipeline.compute(inps, "asked") == filtdict(exp, "asked")

    ## Test OVERWRITES
    #
    solution = pipeline.compute(inps)
    assert solution == exp
    assert solution.overwrites == {}

    solution = pipeline.compute(inps, "asked")
    assert solution == filtdict(exp, "asked")
    assert solution.overwrites == {}
Example #8
0
def test_pruning_multiouts_not_override_intermediates2(exemethod):
    pipeline = compose(
        "pipeline",
        operation(name="must run", needs=["a"],
                  provides=["overridden", "e"])(lambda x: (x, 2 * x)),
        operation(name="op1", needs=["overridden", "c"], provides=["d"])(add),
        operation(name="op2", needs=["d", "e"], provides=["asked"])(mul),
        parallel=exemethod,
    )

    inputs = {"a": 5, "overridden": 1, "c": 2}
    exp = {"a": 5, "overridden": 5, "c": 2, "e": 10, "d": 7, "asked": 70}

    assert pipeline(**inputs) == exp
    assert pipeline.compute(inputs, "asked") == filtdict(exp, "asked")

    ## Test OVERWRITES
    #
    solution = pipeline.compute(inputs)
    assert solution == exp
    assert solution.overwrites == {"overridden": [5, 1]}
    # No overwrites when evicted.
    #
    solution = pipeline.compute(inputs, "asked")
    assert solution == filtdict(exp, "asked")
    assert solution.overwrites == {}
    # ... but overwrites collected if asked.
    #
    solution = pipeline.compute(inputs, ["asked", "overridden"])
    assert solution == filtdict(exp, "asked", "overridden")
    assert solution.overwrites == {"overridden": [5, 1]}
Example #9
0
def test_task_context(exemethod, request):
    def check_task_context():
        sleep(0.15)
        assert task_context.get().op == next(iop), "Corrupted task-context"

    n_ops = 10
    pipe = compose(
        "t",
        *(operation(check_task_context, f"op{i}", provides=f"{i}")
          for i in range(n_ops)),
        parallel=exemethod,
    )
    iop = iter(pipe.ops)

    print(exe_params, cpu_count())
    err = None
    if exe_params.proc and exe_params.marshal:
        err = Exception("^Error sending result")
    elif exe_params.parallel and exe_params.marshal:
        err = AssertionError("^Corrupted task-context")
    elif exe_params.parallel and not os.environ.get("TRAVIS"):
        # Travis has low parallelism and error does not surface
        err = AssertionError("^Corrupted task-context")

    if err:
        with pytest.raises(type(err), match=str(err)):
            pipe.compute()
        raise pytest.xfail(
            "Cannot marshal parallel processes with `task_context` :-(.")
    else:
        pipe.compute()
        with pytest.raises(StopIteration):
            next(iop)
Example #10
0
def test_aliases_pipeline(exemethod):
    provides = ("a", sfxed("s", "foo"))
    aliased = operation(
        lambda: ("A", "B"),
        name="op1",
        provides=provides,
        aliases={
            "a": "b",
            "s": "S1"
        },
    )
    assert aliased._user_provides == provides
    assert tuple(aliased.provides) == (
        "a",
        sfxed("s", "foo"),
        "b",
        "S1",
    )

    pipe = compose(
        "test_net",
        aliased,
        operation(lambda x: x * 2, name="op2", needs="b", provides="c"),
        parallel=exemethod,
    )
    assert pipe() == {"a": "A", "s": "B", "b": "A", "S1": "B", "c": "AA"}
    assert list(pipe.provides) == [*aliased.provides, "c"]
Example #11
0
def test_recompute_till():
    def by2(n):
        return 2 * n

    pipe = compose(
        ...,
        operation(by2, "f0", "a0", "a1"),
        operation(by2, "f1", "a1", "a2"),
        operation(by2, "f2", "a2", "a3"),
        operation(by2, "f3", "a3", "a4"),
    )
    sol = pipe(a0=1)
    assert exe_ops(sol) == ["f0", "f1", "f2", "f3"]
    assert sol == {"a0": 1, "a1": 2, "a2": 4, "a3": 8, "a4": 16}

    inp = dict(sol)
    inp["a1"] = 3

    sol = pipe.compute(inp, outputs="a3", recompute_from="a1")
    assert exe_ops(sol) == ["f1", "f2"]
    assert sol == {"a3": 12}

    with evictions_skipped(True):
        sol = pipe.compute(inp, outputs="a3", recompute_from="a1")
        assert exe_ops(sol) == ["f1", "f2"]
        assert sol == {"a0": 1, "a1": 3, "a2": 6, "a3": 12, "a4": 16}
Example #12
0
def recompute_sol(samplenet):
    pipe = compose(..., samplenet, excludes="sum_op1")
    sol = pipe.compute({"c": 3, "d": 4}, recompute_from=())
    assert sol == {"c": 3, "d": 4, "sum2": 7, "sum3": 10}
    assert exe_ops(sol) == ["sum_op2", "sum_op3"]

    return pipe, sol
Example #13
0
def test_sideffecteds_endured(calc_prices_pipeline):
    ## Break `fill_in_vat_ratios()`.
    #
    @operation(
        needs=[sfxed("ORDER", "Items"), "vat rate"],
        provides=sfxed("ORDER", "VAT rates"),
        endured=True,
    )
    def fill_in_vat_ratios(order: DataFrame, base_vat: float) -> DataFrame:
        raise ValueError("EC transactions have no VAT!")

    calc_prices_pipeline = compose(calc_prices_pipeline.name,
                                   fill_in_vat_ratios,
                                   calc_prices_pipeline,
                                   nest=False)

    sol = calc_prices_pipeline.compute({
        "order_items":
        "milk babylino toilet-paper".split(),
        "vat rate":
        0.18
    })

    print(sol)
    assert sol == {
        "order_items": ["milk", "babylino", "toilet-paper"],
        "vat rate": 0.18,
        "ORDER": {
            "items": ["milk", "babylino", "toilet-paper"],
            "prices": [1, 2, 3],
            "totals": [1, 2, 3],
        },
        "vat owed": None,
    }
Example #14
0
def test_sideffect_real_input(reverse, exemethod):
    sidefx_fail = is_marshal_tasks() and not isinstance(
        get_execution_pool(),
        types.FunctionType  # mp_dummy.Pool
    )

    ops = [
        operation(name="extend", needs=["box", "a"],
                  provides=[sfx("b")])(_box_extend),
        operation(name="increment", needs=["box", sfx("b")],
                  provides="c")(_box_increment),
    ]
    if reverse:
        ops = reversed(ops)
    # Designate `a`, `b` as sideffect inp/out arguments.
    graph = compose("mygraph", *ops, parallel=exemethod)

    box_orig = [0]
    assert graph(**{
        "box": [0],
        "a": True
    }) == {
        "a": True,
        "box": box_orig if sidefx_fail else [1, 2, 3],
        "c": None,
    }
    assert graph.compute({
        "box": [0],
        "a": True
    }, ["box", "c"]) == {
        "box": box_orig if sidefx_fail else [1, 2, 3],
        "c": None,
    }
Example #15
0
def samplenet():
    """sum1 = (a + b), sum2 = (c + d), sum3 = c + (c + d)"""
    sum_op1 = operation(name="sum_op1", needs=["a", "b"], provides="sum1")(add)
    sum_op2 = operation(name="sum_op2", needs=["c", "d"], provides="sum2")(add)
    sum_op3 = operation(name="sum_op3", needs=["c", "sum2"],
                        provides="sum3")(add)
    return compose("test_net", sum_op1, sum_op2, sum_op3)
Example #16
0
def test_cwd_pipeline():
    op = compose(
        ...,
        operation(
            str,
            None,
            needs=[
                "a",
                "a/b",
                "/r/b",
            ],
            provides=["A/B", "C", "/R"],
            aliases=[("A/B", "aa"), ("C", "CC"), ("/R", "RR")],
        ),
        cwd="/root",
    )
    exp = """
    Pipeline('test_cwd_pipeline',
        needs=['/root/a'($),
            '/root/a/b'($),
            '/r/b'($)],
        provides=['/root/A/B'($),
            '/root/C'($),
            '/R'($),
            '/root/aa'($),
            '/root/CC'($),
            '/root/RR'($)],
        x1 ops: str)
    """
    assert oneliner(op) == oneliner(exp)
Example #17
0
def test_node_predicate_based_prune():
    pipeline = compose(
        "N",
        operation(name="A",
                  needs=["a"],
                  provides=["aa"],
                  node_props={"color": "red"})(identity),
        operation(name="B",
                  needs=["b"],
                  provides=["bb"],
                  node_props={"color": "green"})(identity),
        operation(name="C", needs=["c"], provides=["cc"])(identity),
        operation(
            name="SUM",
            needs=[optional(i) for i in ("aa", "bb", "cc")],
            provides=["sum"],
        )(addall),
    )
    inp = {"a": 1, "b": 2, "c": 3}
    assert pipeline(**inp)["sum"] == 6
    assert len(pipeline.net.graph.nodes) == 11

    pred = lambda n, d: d.get("color", None) != "red"
    assert pipeline.withset(predicate=pred)(**inp)["sum"] == 5
    assert len(pipeline.withset(predicate=pred).compile().dag.nodes) == 9

    pred = lambda n, d: "color" not in d
    assert pipeline.withset(predicate=pred)(**inp)["sum"] == 3
    assert len(pipeline.withset(predicate=pred).compile().dag.nodes) == 7
Example #18
0
def test_pruning_not_overrides_given_intermediate(exemethod):
    # Test #25: v1.2.4 overwrites intermediate data when no output asked
    pipeline = compose(
        "pipeline",
        operation(name="not run", needs=["a"],
                  provides=["overridden"])(scream),
        operation(name="op", needs=["overridden", "c"],
                  provides=["asked"])(add),
        parallel=exemethod,
    )

    inputs = {"a": 5, "overridden": 1, "c": 2}
    exp = {"a": 5, "overridden": 1, "c": 2, "asked": 3}
    # v1.2.4.ok
    assert pipeline.compute(inputs, "asked") == filtdict(exp, "asked")
    # FAILs
    # - on v1.2.4 with (overridden, asked): = (5, 7) instead of (1, 3)
    # - on #18(unsatisfied) + #23(ordered-sets) with (overridden, asked) = (5, 7) instead of (1, 3)
    # FIXED on #26
    assert pipeline(**inputs) == exp

    ## Test OVERWRITES
    #
    solution = pipeline.compute(inputs, ["asked"])
    assert solution == filtdict(exp, "asked")
    assert solution.overwrites == {}  # unjust must have been pruned

    solution = pipeline(**inputs)
    assert solution == exp
    assert solution.overwrites == {}  # unjust must have been pruned
Example #19
0
def test_pipeline_node_props():
    op1 = operation(lambda: None,
                    name="a",
                    node_props={
                        "a": 11,
                        "b": 0,
                        "bb": 2
                    })
    op2 = operation(lambda: None, name="b", node_props={"a": 3, "c": 4})
    pipeline = compose("n", op1, op2, node_props={"bb": 22, "c": 44})

    exp = {
        "a": {
            "typ": 1,
            "a": 11,
            "b": 0,
            "bb": 22,
            "c": 44
        },
        "b": {
            "typ": 1,
            "a": 3,
            "bb": 22,
            "c": 44
        },
    }
    node_props = _collect_op_props(pipeline)
    assert node_props == exp

    # Check node-prop sideffects are not modified
    #
    assert op1.node_props == {"a": 11, "b": 0, "bb": 2}
    assert op2.node_props == {"a": 3, "c": 4}
Example #20
0
def test_step_badge():
    p = compose(
        "",
        operation(str, "0", provides="b"),
        operation(str, "1", needs="b", provides="c"),
    )
    sol = p.compute(outputs="c")
    dot_str = str(sol.plot())
    print(dot_str)
    exp = r"""
    digraph solution_x4_nodes {
    fontname=italic;
    node [fillcolor=white, style=filled];
    <0> [label=<<TABLE CELLBORDER="0" CELLSPACING="0" STYLE="rounded" BGCOLOR="wheat">
        <TR>
            <TD BORDER="1" SIDES="b" ALIGN="left" TOOLTIP="FnOp(name=&#x27;0&#x27;, provides=[&#x27;b&#x27;], fn=&#x27;str&#x27;)" TARGET="_top"
            ><B>OP:</B> <I>0</I></TD>
            <TD BORDER="1" SIDES="b" ALIGN="right"><TABLE BORDER="0" CELLBORDER="0" CELLSPACING="1" CELLPADDING="2" ALIGN="right">
                    <TR>
                        <TD STYLE="rounded" HEIGHT="22" WIDTH="14" FIXEDSIZE="true" VALIGN="BOTTOM" BGCOLOR="#00bbbb" TITLE="computation order" HREF="https://graphtik.readthedocs.io/en/latest/arch.html#term-steps" TARGET="_top"><FONT FACE="monospace" COLOR="white"><B>0</B></FONT></TD>
                    </TR>
                </TABLE></TD>
        </TR>
        <TR>
            <TD COLSPAN="2" ALIGN="left" TARGET="_top"
            ><B>FN:</B> builtins.str</TD>
        </TR>
    </TABLE>>, shape=plain, tooltip=<0>];
    <b> [color="#006666", fixedsize=shape, label=<<TABLE CELLBORDER="0" CELLSPACING="0" BORDER="0">
                <TR>
                    <TD STYLE="rounded" CELLSPACING="2" CELLPADDING="4" BGCOLOR="#00bbbb" TITLE="computation order" HREF="https://graphtik.readthedocs.io/en/latest/arch.html#term-steps" TARGET="_top"
                    ><FONT FACE="monospace" COLOR="white"><B>2</B></FONT></TD><TD>b</TD>
                </TR>
            </TABLE>>, penwidth=3, shape=rect, style="filled,dashed", tooltip="(to evict)\n(evicted)"];
    <1> [label=<<TABLE CELLBORDER="0" CELLSPACING="0" STYLE="rounded" BGCOLOR="wheat">
        <TR>
            <TD BORDER="1" SIDES="b" ALIGN="left" TOOLTIP="FnOp(name=&#x27;1&#x27;, needs=[&#x27;b&#x27;], provides=[&#x27;c&#x27;], fn=&#x27;str&#x27;)" TARGET="_top"
            ><B>OP:</B> <I>1</I></TD>
            <TD BORDER="1" SIDES="b" ALIGN="right"><TABLE BORDER="0" CELLBORDER="0" CELLSPACING="1" CELLPADDING="2" ALIGN="right">
                    <TR>
                        <TD STYLE="rounded" HEIGHT="22" WIDTH="14" FIXEDSIZE="true" VALIGN="BOTTOM" BGCOLOR="#00bbbb" TITLE="computation order" HREF="https://graphtik.readthedocs.io/en/latest/arch.html#term-steps" TARGET="_top"><FONT FACE="monospace" COLOR="white"><B>1</B></FONT></TD>
                    </TR>
                </TABLE></TD>
        </TR>
        <TR>
            <TD COLSPAN="2" ALIGN="left" TARGET="_top"
            ><B>FN:</B> builtins.str</TD>
        </TR>
    </TABLE>>, shape=plain, tooltip=<1>];
    <c> [fillcolor=wheat, fixedsize=shape, label=<<TABLE CELLBORDER="0" CELLSPACING="0" BORDER="0">
                <TR><TD>c</TD>
                </TR>
            </TABLE>>, shape=house, style=filled, tooltip="(output)\n(str)"];
    <0> -> <b>  [headport=n, tailport=s];
    <b> -> <1>  [arrowtail=inv, dir=back, headport=n, tailport=s];
    <1> -> <c>  [headport=n, tailport=s];
    legend [URL="https://graphtik.readthedocs.io/en/latest/_images/GraphtikLegend.svg", fillcolor=yellow, shape=component, style=filled, target=_blank];
    }
    """
    assert _striplines(dot_str) == _striplines(exp)
Example #21
0
def cycler_pipeline(
    aug: autog.Autograph = None, domain=("cycle", None), **pipeline_kw
) -> Pipeline:
    """
    Main pipeline to "run" the cycle.

    .. graphtik::
        :height: 600
        :hide:
        :name: cycler_pipeline

        >>> pipe = cycler_pipeline()
    """
    aug = aug or wio.make_autograph(domain=domain)
    ops = aug.wrap_funcs(
        [
            cycles.get_wltc_class_data,
            cycler.get_forced_cycle,
            cycler.init_cycle_velocity,
            cycler.calc_acceleration,
            cycles.get_class_phase_boundaries,
            cycler.attach_class_v_phase_markers,
            cycler.calc_class_va_phase_markers,
            *gwots_pipeline(aug).ops,
            *p_req_pipeline(aug).ops,
            *n_max_pipeline(aug).ops,
            wio.GearMultiIndexer.from_df,
            cycler.attach_wots,
        ]
    )
    pipe = compose(..., *ops, **pipeline_kw)

    return pipe
Example #22
0
def scale_trace_pipeline(aug: autog.Autograph = None,
                         **pipeline_kw) -> Pipeline:
    """
    Main pipeline to scale the Velocity trace:

    .. graphtik::
        :height: 800
        :hide:
        :name: scale_trace_pipeline

        >>> netop = scale_trace_pipeline()

    **Example:**

        >>> mdl = {"n_idle": 500, "n_rated": 3000, "p_rated": 80, "t_cold_end": 470}
    """
    aug = aug or wio.make_autograph()
    ops = aug.wrap_funcs([
        *wltc_class_pipeline(aug).ops,
        *vmax_pipeline(aug).ops,
        *downscale_pipeline(aug).ops,
        *compensate_capped_pipeline(aug).ops,
        *v_distances_pipeline(aug).ops,
    ])
    pipe = compose(..., *ops, **pipeline_kw)

    return pipe
Example #23
0
def test_compose_rename_dict(caplog):
    pip = compose(
        "t",
        operation(str, "op1", provides=["a", "aa"]),
        operation(
            str,
            "op2",
            needs="a",
            provides=["b", sfx("c")],
            aliases=[("b", "B"), ("b", "p")],
        ),
        nest={
            "op1": "OP1",
            "op2": lambda n: "OP2",
            "a": "A",
            "b": "bb"
        },
    )
    print(str(pip))
    assert str(pip) == (
        "Pipeline('t', needs=['A'], "
        "provides=['A', 'aa', 'bb', sfx('c'), 'B', 'p'], x2 ops: OP1, OP2)")
    print(str(pip.ops))
    assert (str(pip.ops) == dedent("""
        [FnOp(name='OP1', provides=['A', 'aa'], fn='str'),
         FnOp(name='OP2', needs=['A'], provides=['bb', sfx('c'), 'B', 'p'],
         aliases=[('bb', 'B'), ('bb', 'p')], fn='str')]
    """).replace("\n", ""))
Example #24
0
def compensate_capped_pipeline(aug: autog.Autograph = None,
                               **pipeline_kw) -> Pipeline:
    """
    Pipeline to provide `V_compensated` from `V_capped` trace (Annex 1, 9).

    .. graphtik::
        :hide:
        :name: compensate_capped_pipeline

        >>> pipe = compensate_capped_pipeline()
    """
    aug = aug or wio.make_autograph()
    ops = aug.wrap_funcs(
        [
            cycles.get_wltc_class_data,
            cycles.get_class_phase_boundaries,
            cycles.make_class_phases_grouper,
            downscale.calc_V_capped,
            downscale.calc_compensate_phases_t_extra_raw,
            downscale.round_compensate_phases_t_extra,
            downscale.calc_V_compensated,
        ],
        exclude=[
            "calc_compensated_distances", "make_compensated_phases_grouper"
        ],
    )
    pipe = compose(..., *ops, **pipeline_kw)

    return pipe
Example #25
0
def pipeline():
    return compose(
        "pipeline",
        operation(name="add", needs=["a", "b1"], provides=["ab1"])(add),
        operation(name="sub", needs=["a", optional("b2")],
                  provides=["ab2"])(lambda a, b=1: a - b),
        operation(name="abb", needs=["ab1", "ab2"], provides=["asked"])(add),
    )
Example #26
0
def test_compose_rename_preserve_ops(caplog):
    pip = compose(
        "t",
        operation(str, "op1"),
        operation(str, "op2"),
        nest=lambda na: f"aa.{na.name}",
    )
    assert str(pip) == "Pipeline('t', x2 ops: aa.op1, aa.op2)"
Example #27
0
def test_implicit_out():
    op = operation(str, "hh", provides=["A", implicit("a")])

    pipe = compose(..., op)
    got = pipe.compute()
    assert got == {"A": ""}

    assert "(implicit)" in str(op.plot())
Example #28
0
def test_compose_rename_dict_non_str(caplog):
    pip = compose(
        "t",
        operation(str, "op1"),
        operation(str, "op2"),
        nest={"op1": 1},
    )
    exp = "Pipeline('t', x2 ops: op1, op2)"
    print(pip)
    assert str(pip) == exp
    exp = "Pipeline('t', x2 ops: t.op1, op2)"
    pip = compose("t", pip, nest={"op1": 1, "op2": 0})
    assert str(pip) == exp
    pip = compose("t", pip, nest={"op1": 1, "op2": ""})
    assert str(pip) == exp
    for record in caplog.records:
        assert "Failed to nest-rename" not in record.message
Example #29
0
def test_network_combine():
    sum_op1 = operation(name="sum_op1",
                        needs=[vararg("a"), vararg("b")],
                        provides="sum1")(addall)
    sum_op2 = operation(name="sum_op2",
                        needs=[vararg("a"), "b"],
                        provides="sum2")(addall)
    sum_op3 = operation(name="sum_op3", needs=["sum1", "c"],
                        provides="sum3")(add)
    net1 = compose("my network 1", sum_op1, sum_op2, sum_op3)
    exp = {"a": 1, "b": 2, "c": 4, "sum1": 3, "sum2": 3, "sum3": 7}
    assert net1(a=1, b=2, c=4) == exp
    assert repr(net1).startswith(
        "Pipeline('my network 1', needs=['a'(?), 'b', 'sum1', 'c'], "
        "provides=['sum1', 'sum2', 'sum3'], x3 ops")

    sum_op4 = operation(name="sum_op1",
                        needs=[vararg("a"), "b"],
                        provides="sum1")(addall)
    sum_op5 = operation(name="sum_op4", needs=["sum1", "b"],
                        provides="sum2")(add)
    net2 = compose("my network 2", sum_op4, sum_op5)
    exp = {"a": 1, "b": 2, "sum1": 3, "sum2": 5}
    assert net2(**{"a": 1, "b": 2}) == exp
    assert repr(net2).startswith(
        "Pipeline('my network 2', needs=['a'(?), 'b', 'sum1'], provides=['sum1', 'sum2'], x2 ops"
    )

    net3 = compose("merged", net1, net2)
    exp = {"a": 1, "b": 2, "c": 4, "sum1": 3, "sum2": 5, "sum3": 7}
    assert net3(a=1, b=2, c=4) == exp

    assert repr(net3).startswith(
        "Pipeline('merged', needs=['a'(?), 'b', 'sum1', 'c'], provides=['sum1', 'sum2', 'sum3'], x4 ops"
    )

    ## Reverse ops, change results and `needs` optionality.
    #
    net3 = compose("merged", net2, net1)
    exp = {"a": 1, "b": 2, "c": 4, "sum1": 3, "sum2": 3, "sum3": 7}
    assert net3(**{"a": 1, "b": 2, "c": 4}) == exp

    assert repr(net3).startswith(
        "Pipeline('merged', needs=['a'(?), 'b', 'sum1', 'c'], provides=['sum1', 'sum2', 'sum3'], x4 ops"
    )
Example #30
0
def test_combine_clusters():
    p1 = compose(
        "op1",
        operation(lambda a, b: None, name="op1", needs=["a", "b"], provides=["ab"]),
        operation(lambda a, b: None, name="op2", needs=["a", "ab"], provides="c"),
        operation(lambda a: None, name="op3", needs="c", provides="C"),
    )

    p2 = compose(
        "op2",
        operation(lambda a, b: None, name="op1", needs=["a", "b"], provides=["ab"]),
        operation(lambda a, b: None, name="op2", needs=["c", "ab"], provides=["cab"]),
    )

    merged_graph = compose("m", p1, p2, nest=True)
    dot: pydot.Dot = merged_graph.plot()
    assert dot.get_subgraph(f"cluster_{p1.name}")
    assert dot.get_subgraph(f"cluster_{p2.name}")