Пример #1
0
def test_step_failure(workflow_start_regular_shared, tmp_path):
    (tmp_path / "test").write_text("0")

    @ray.remote
    def unstable_step():
        v = int((tmp_path / "test").read_text())
        (tmp_path / "test").write_text(f"{v + 1}")
        if v < 10:
            raise ValueError("Invalid")
        return v

    with pytest.raises(Exception):
        workflow.create(
            update_workflow_options(unstable_step, max_retries=-2).bind())

    with pytest.raises(Exception):
        workflow.create(
            update_workflow_options(unstable_step,
                                    max_retries=2).bind()).run()
    assert (10 == workflow.create(
        update_workflow_options(unstable_step, max_retries=7).bind()).run())
    (tmp_path / "test").write_text("0")
    (ret, err) = workflow.create(
        update_workflow_options(unstable_step,
                                max_retries=2,
                                catch_exceptions=True).bind()).run()
    assert ret is None
    assert isinstance(err, ValueError)
    (ret, err) = workflow.create(
        update_workflow_options(unstable_step,
                                max_retries=7,
                                catch_exceptions=True).bind()).run()
    assert ret == 10
    assert err is None
Пример #2
0
def checkpoint_dag(checkpoint):
    x = utils.update_workflow_options(
        large_input, name="large_input", checkpoint=checkpoint
    ).bind()
    y = utils.update_workflow_options(
        identity, name="identity", checkpoint=checkpoint
    ).bind(x)
    return workflow.continuation(
        utils.update_workflow_options(average, name="average").bind(y)
    )
Пример #3
0
def inplace_test():
    from ray.worker import global_worker

    worker_id = global_worker.worker_id
    x = update_workflow_options(check_and_update,
                                allow_inplace=True).bind("@", worker_id)
    y = check_and_update.bind(x, worker_id)
    z = update_workflow_options(check_and_update,
                                allow_inplace=True).bind(y, worker_id)
    return workflow.continuation(z)
Пример #4
0
def test_get_named_step_output_finished(workflow_start_regular, tmp_path):
    @ray.remote
    def double(v):
        return 2 * v

    # Get the result from named step after workflow finished
    assert 4 == workflow.create(
        update_workflow_options(double, name="outer").bind(
            update_workflow_options(double,
                                    name="inner").bind(1))).run("double")
    assert ray.get(workflow.get_output("double", name="inner")) == 2
    assert ray.get(workflow.get_output("double", name="outer")) == 4
Пример #5
0
def test_get_named_step_output_running(workflow_start_regular, tmp_path):
    @ray.remote
    def double(v, lock=None):
        if lock is not None:
            with FileLock(lock_path):
                return 2 * v
        else:
            return 2 * v

    # Get the result from named step after workflow before it's finished
    lock_path = str(tmp_path / "lock")
    lock = FileLock(lock_path)
    lock.acquire()
    output = workflow.create(
        update_workflow_options(double, name="outer").bind(
            update_workflow_options(double, name="inner").bind(1, lock_path),
            lock_path)).run_async("double-2")

    inner = workflow.get_output("double-2", name="inner")
    outer = workflow.get_output("double-2", name="outer")

    @ray.remote
    def wait(obj_ref):
        return ray.get(obj_ref[0])

    # Make sure nothing is finished.
    ready, waiting = ray.wait(
        [wait.remote([output]),
         wait.remote([inner]),
         wait.remote([outer])],
        timeout=1)
    assert 0 == len(ready)
    assert 3 == len(waiting)

    # Once job finished, we'll be able to get the result.
    lock.release()
    assert 4 == ray.get(output)

    # Here sometimes inner will not be generated when we call
    # run_async. So there is a race condition here.
    try:
        v = ray.get(inner)
    except Exception:
        v = None
    if v is not None:
        assert 2 == v
    assert 4 == ray.get(outer)

    inner = workflow.get_output("double-2", name="inner")
    outer = workflow.get_output("double-2", name="outer")
    assert 2 == ray.get(inner)
    assert 4 == ray.get(outer)
Пример #6
0
def test_get_output_3(workflow_start_regular, tmp_path):
    cnt_file = tmp_path / "counter"
    cnt_file.write_text("0")
    error_flag = tmp_path / "error"
    error_flag.touch()

    @ray.remote
    def incr():
        v = int(cnt_file.read_text())
        cnt_file.write_text(str(v + 1))
        if error_flag.exists():
            raise ValueError()
        return 10

    with pytest.raises(ray.exceptions.RaySystemError):
        workflow.create(update_workflow_options(
            incr, max_retries=0).bind()).run("incr")

    assert cnt_file.read_text() == "1"

    with pytest.raises(ray.exceptions.RaySystemError):
        ray.get(workflow.get_output("incr"))

    assert cnt_file.read_text() == "1"
    error_flag.unlink()
    with pytest.raises(ray.exceptions.RaySystemError):
        ray.get(workflow.get_output("incr"))
    assert ray.get(workflow.resume("incr")) == 10
Пример #7
0
def test_options_update(workflow_start_regular_shared):
    from ray.workflow.common import WORKFLOW_OPTIONS

    # Options are given in decorator first, then in the first .options()
    # and finally in the second .options()
    @workflow.options(name="old_name", metadata={"k": "v"}, max_retries=1)
    @ray.remote(num_cpus=2)
    def f():
        return

    # name is updated from the old name in the decorator to the new name in the first
    # .options(), then preserved in the second options.
    # metadata and ray_options are "updated"
    # max_retries only defined in the decorator and it got preserved all the way
    new_f = update_workflow_options(f,
                                    name="new_name",
                                    num_returns=2,
                                    metadata={"extra_k2": "extra_v2"})
    options = new_f.bind().get_options()
    assert options == {
        "num_cpus": 2,
        "_metadata": {
            WORKFLOW_OPTIONS: {
                "name": "new_name",
                "metadata": {
                    "extra_k2": "extra_v2"
                },
                "max_retries": 1,
                "num_returns": 2,
            }
        },
    }
Пример #8
0
 def exponential_fail(k, n):
     if n > 0:
         if n < 3:
             raise Exception("Failed intentionally")
         return workflow.continuation(
             update_workflow_options(exponential_fail,
                                     name=f"step_{n}").bind(k * 2, n - 1))
     return k
Пример #9
0
    def tail_recursion(n):
        import inspect

        # check if the stack is growing
        assert len(inspect.stack(0)) < 20
        if n <= 0:
            return "ok"
        return workflow.continuation(
            update_workflow_options(tail_recursion,
                                    allow_inplace=True).bind(n - 1))
Пример #10
0
def test_get_named_step_output_error(workflow_start_regular, tmp_path):
    @ray.remote
    def double(v, error):
        if error:
            raise Exception()
        return v + v

    # Force it to fail for the outer step
    with pytest.raises(Exception):
        workflow.create(
            update_workflow_options(double, name="outer").bind(
                update_workflow_options(double, name="inner").bind(1, False),
                True)).run("double")

    # For the inner step, it should have already been executed.
    assert 2 == ray.get(workflow.get_output("double", name="inner"))
    outer = workflow.get_output("double", name="outer")
    with pytest.raises(Exception):
        ray.get(outer)
Пример #11
0
def test_user_metadata_not_dict(workflow_start_regular):
    @ray.remote
    def simple():
        return 0

    with pytest.raises(ValueError):
        workflow.create(update_workflow_options(simple, metadata="x").bind())

    with pytest.raises(ValueError):
        workflow.create(simple.bind()).run(metadata="x")
Пример #12
0
def test_nested_catch_exception(workflow_start_regular_shared, tmp_path):
    @ray.remote
    def f2():
        return 10

    @ray.remote
    def f1():
        return workflow.continuation(f2.bind())

    assert (10, None) == workflow.create(
        update_workflow_options(f1, catch_exceptions=True).bind()).run()
Пример #13
0
def test_nested_catch_exception_2(workflow_start_regular_shared, tmp_path):
    @ray.remote
    def f1(n):
        if n == 0:
            raise ValueError()
        else:
            return workflow.continuation(f1.bind(n - 1))

    ret, err = workflow.create(
        update_workflow_options(f1, catch_exceptions=True).bind(5)).run()
    assert ret is None
    assert isinstance(err, ValueError)
Пример #14
0
def test_user_metadata_not_json_serializable(workflow_start_regular):
    @ray.remote
    def simple():
        return 0

    class X:
        pass

    with pytest.raises(ValueError):
        workflow.create(update_workflow_options(simple, metadata={"x": X()}).bind())

    with pytest.raises(ValueError):
        workflow.create(simple.bind()).run(metadata={"x": X()})
Пример #15
0
def exp_inplace(k, n, worker_id=None):
    from ray.worker import global_worker

    _worker_id = global_worker.worker_id
    if worker_id is not None:
        # sub-workflows running inplace
        assert _worker_id == worker_id
    worker_id = _worker_id

    if n == 0:
        return k
    return workflow.continuation(
        update_workflow_options(exp_inplace, allow_inplace=True).bind(
            2 * k, n - 1, worker_id))
Пример #16
0
def test_dynamic_output(workflow_start_regular_shared):
    @ray.remote
    def exponential_fail(k, n):
        if n > 0:
            if n < 3:
                raise Exception("Failed intentionally")
            return workflow.continuation(
                update_workflow_options(exponential_fail,
                                        name=f"step_{n}").bind(k * 2, n - 1))
        return k

    # When workflow fails, the dynamic output should points to the
    # latest successful step.
    try:
        workflow.create(
            update_workflow_options(exponential_fail, name="step_0").bind(
                3, 10)).run(workflow_id="dynamic_output")
    except Exception:
        pass
    from ray.workflow.workflow_storage import get_workflow_storage

    wf_storage = get_workflow_storage(workflow_id="dynamic_output")
    result = wf_storage.inspect_step("step_0")
    assert result.output_step_id == "step_3"
Пример #17
0
def checkpoint_dag2(checkpoint):
    x = utils.update_workflow_options(large_input, checkpoint=checkpoint).bind()
    y = utils.update_workflow_options(identity2, checkpoint=checkpoint).bind(x)
    return workflow.continuation(average.bind(y))