Esempio n. 1
0
def test_reduce_task_properly_applies_trigger_across_all_mapped_states_for_deep_pipelines(
    executor, ):
    @prefect.task
    def ll():
        return [0, 1]

    @prefect.task(max_retries=5, retry_delay=datetime.timedelta(hours=1))
    def div(x):
        return 1 / x

    @prefect.task
    def take_sum(x):
        return sum(x)

    with Flow(name="test") as f:
        d = div.map(ll)
        s = take_sum(d)

    state = FlowRunner(flow=f).run(executor=executor, return_tasks=[s])
    assert state.is_running()
    assert state.result[s].is_pending()
Esempio n. 2
0
    def test_manual_only_trigger_caches_inputs(self, executor):
        with Flow(name="test") as f:
            x = Parameter("x")
            inp = SuccessTask()
            t = AddTask(trigger=manual_only)
            res = t(x, inp)

        first_state = FlowRunner(flow=f).run(executor=executor,
                                             parameters=dict(x=11),
                                             return_tasks=f.tasks)
        assert first_state.is_running()

        first_state.result.update(
            {res: Resume(cached_inputs=first_state.result[res].cached_inputs)})
        second_state = FlowRunner(flow=f).run(
            executor=executor,
            parameters=dict(x=1),
            return_tasks=[res],
            task_states=first_state.result,
        )
        assert isinstance(second_state, Success)
        assert second_state.result[res].result == 12
Esempio n. 3
0
    def test_retries_ignore_cached_inputs_if_upstream_results_are_available(
        self, executor
    ):
        with Flow(name="test") as f:
            a = CountTask()
            b = ReturnTask(max_retries=1, retry_delay=datetime.timedelta(0))
            a_res = a()
            b_res = b(a_res)

        first_state = FlowRunner(flow=f).run(executor=executor, return_tasks=f.tasks)
        assert first_state.is_running()

        a_state = first_state.result[a_res]
        a_state.result = 100  # modify the result
        b_state = first_state.result[b_res]
        b_state.cached_inputs = dict(x=Result(2))  # artificially alter state

        with raise_on_exception():  # without caching we'd expect a KeyError
            second_state = FlowRunner(flow=f).run(
                executor=executor, return_tasks=[b_res], task_states=first_state.result
            )
        assert isinstance(second_state, Success)
        assert second_state.result[b_res].result == 1 / 99
Esempio n. 4
0
    def test_retries_use_cached_inputs(self, executor):
        with Flow(name="test") as f:
            a = CountTask()
            b = ReturnTask(max_retries=1, retry_delay=datetime.timedelta(0))
            a_res = a()
            b_res = b(a_res)

        first_state = FlowRunner(flow=f).run(executor=executor, return_tasks=f.tasks)
        assert first_state.is_running()

        a_state = first_state.result[a_res]
        a_state.result = (
            NoResult
        )  # remove the result to see if the cached results are picked up
        b_state = first_state.result[b_res]
        b_state.cached_inputs = dict(x=Result(2))  # artificially alter state

        with raise_on_exception():  # without caching we'd expect a KeyError
            second_state = FlowRunner(flow=f).run(
                executor=executor, return_tasks=[b_res], task_states=first_state.result
            )
        assert isinstance(second_state, Success)
        assert second_state.result[b_res].result == 1
Esempio n. 5
0
 def test_running_stays_running(self):
     state = Running()
     flow = Flow(name="test", tasks=[Task()])
     new_state = FlowRunner(flow=flow).set_flow_to_running(state=state)
     assert new_state.is_running()
Esempio n. 6
0
 def test_pending_becomes_running(self, state):
     flow = Flow(name="test", tasks=[Task()])
     new_state = FlowRunner(flow=flow).set_flow_to_running(state=state)
     assert new_state.is_running()